10d160211SJeremy Fitzhardinge /* 20d160211SJeremy Fitzhardinge * Virtual network driver for conversing with remote driver backends. 30d160211SJeremy Fitzhardinge * 40d160211SJeremy Fitzhardinge * Copyright (c) 2002-2005, K A Fraser 50d160211SJeremy Fitzhardinge * Copyright (c) 2005, XenSource Ltd 60d160211SJeremy Fitzhardinge * 70d160211SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or 80d160211SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2 90d160211SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed 100d160211SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other 110d160211SJeremy Fitzhardinge * software packages, subject to the following license: 120d160211SJeremy Fitzhardinge * 130d160211SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy 140d160211SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without 150d160211SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify, 160d160211SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software, 170d160211SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to 180d160211SJeremy Fitzhardinge * the following conditions: 190d160211SJeremy Fitzhardinge * 200d160211SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in 210d160211SJeremy Fitzhardinge * all copies or substantial portions of the Software. 220d160211SJeremy Fitzhardinge * 230d160211SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 240d160211SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 250d160211SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 260d160211SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 270d160211SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 280d160211SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 290d160211SJeremy Fitzhardinge * IN THE SOFTWARE. 300d160211SJeremy Fitzhardinge */ 310d160211SJeremy Fitzhardinge 32383eda32SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33383eda32SJoe Perches 340d160211SJeremy Fitzhardinge #include <linux/module.h> 350d160211SJeremy Fitzhardinge #include <linux/kernel.h> 360d160211SJeremy Fitzhardinge #include <linux/netdevice.h> 370d160211SJeremy Fitzhardinge #include <linux/etherdevice.h> 380d160211SJeremy Fitzhardinge #include <linux/skbuff.h> 390d160211SJeremy Fitzhardinge #include <linux/ethtool.h> 400d160211SJeremy Fitzhardinge #include <linux/if_ether.h> 419ecd1a75SWei Liu #include <net/tcp.h> 420d160211SJeremy Fitzhardinge #include <linux/udp.h> 430d160211SJeremy Fitzhardinge #include <linux/moduleparam.h> 440d160211SJeremy Fitzhardinge #include <linux/mm.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 460d160211SJeremy Fitzhardinge #include <net/ip.h> 476c5aa6fcSDenis Kirjanov #include <linux/bpf.h> 486c5aa6fcSDenis Kirjanov #include <net/page_pool.h> 496c5aa6fcSDenis Kirjanov #include <linux/bpf_trace.h> 500d160211SJeremy Fitzhardinge 511ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 520d160211SJeremy Fitzhardinge #include <xen/xenbus.h> 530d160211SJeremy Fitzhardinge #include <xen/events.h> 540d160211SJeremy Fitzhardinge #include <xen/page.h> 55b9136d20SIgor Mammedov #include <xen/platform_pci.h> 560d160211SJeremy Fitzhardinge #include <xen/grant_table.h> 570d160211SJeremy Fitzhardinge 580d160211SJeremy Fitzhardinge #include <xen/interface/io/netif.h> 590d160211SJeremy Fitzhardinge #include <xen/interface/memory.h> 600d160211SJeremy Fitzhardinge #include <xen/interface/grant_table.h> 610d160211SJeremy Fitzhardinge 6250ee6061SAndrew J. Bennieston /* Module parameters */ 63034702a6SJuergen Gross #define MAX_QUEUES_DEFAULT 8 6450ee6061SAndrew J. Bennieston static unsigned int xennet_max_queues; 6550ee6061SAndrew J. Bennieston module_param_named(max_queues, xennet_max_queues, uint, 0644); 6650ee6061SAndrew J. Bennieston MODULE_PARM_DESC(max_queues, 6750ee6061SAndrew J. Bennieston "Maximum number of queues per virtual interface"); 6850ee6061SAndrew J. Bennieston 69c2c63310SAndrea Righi #define XENNET_TIMEOUT (5 * HZ) 70c2c63310SAndrea Righi 710fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops; 720d160211SJeremy Fitzhardinge 730d160211SJeremy Fitzhardinge struct netfront_cb { 743683243bSIan Campbell int pull_to; 750d160211SJeremy Fitzhardinge }; 760d160211SJeremy Fitzhardinge 770d160211SJeremy Fitzhardinge #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 780d160211SJeremy Fitzhardinge 790d160211SJeremy Fitzhardinge #define RX_COPY_THRESHOLD 256 800d160211SJeremy Fitzhardinge 8130c5d7f0SJulien Grall #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 8230c5d7f0SJulien Grall #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 831f3c2ebaSDavid Vrabel 841f3c2ebaSDavid Vrabel /* Minimum number of Rx slots (includes slot for GSO metadata). */ 851f3c2ebaSDavid Vrabel #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 860d160211SJeremy Fitzhardinge 872688fcb7SAndrew J. Bennieston /* Queue name is interface name with "-qNNN" appended */ 882688fcb7SAndrew J. Bennieston #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 892688fcb7SAndrew J. Bennieston 902688fcb7SAndrew J. Bennieston /* IRQ name is queue name with "-tx" or "-rx" appended */ 912688fcb7SAndrew J. Bennieston #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 922688fcb7SAndrew J. Bennieston 938edfe2e9SJuergen Gross static DECLARE_WAIT_QUEUE_HEAD(module_wq); 945b5971dfSEduardo Otubo 95e00f85beSstephen hemminger struct netfront_stats { 96900e1833SDavid Vrabel u64 packets; 97900e1833SDavid Vrabel u64 bytes; 98e00f85beSstephen hemminger struct u64_stats_sync syncp; 99e00f85beSstephen hemminger }; 100e00f85beSstephen hemminger 1012688fcb7SAndrew J. Bennieston struct netfront_info; 1022688fcb7SAndrew J. Bennieston 1032688fcb7SAndrew J. Bennieston struct netfront_queue { 1042688fcb7SAndrew J. Bennieston unsigned int id; /* Queue ID, 0-based */ 1052688fcb7SAndrew J. Bennieston char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 1062688fcb7SAndrew J. Bennieston struct netfront_info *info; 1070d160211SJeremy Fitzhardinge 1086c5aa6fcSDenis Kirjanov struct bpf_prog __rcu *xdp_prog; 1096c5aa6fcSDenis Kirjanov 110bea3348eSStephen Hemminger struct napi_struct napi; 1110d160211SJeremy Fitzhardinge 112d634bf2cSWei Liu /* Split event channels support, tx_* == rx_* when using 113d634bf2cSWei Liu * single event channel. 114d634bf2cSWei Liu */ 115d634bf2cSWei Liu unsigned int tx_evtchn, rx_evtchn; 116d634bf2cSWei Liu unsigned int tx_irq, rx_irq; 117d634bf2cSWei Liu /* Only used when split event channels support is enabled */ 1182688fcb7SAndrew J. Bennieston char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 1192688fcb7SAndrew J. Bennieston char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 1200d160211SJeremy Fitzhardinge 1210d160211SJeremy Fitzhardinge spinlock_t tx_lock; 12284284d3cSJeremy Fitzhardinge struct xen_netif_tx_front_ring tx; 12384284d3cSJeremy Fitzhardinge int tx_ring_ref; 1240d160211SJeremy Fitzhardinge 1250d160211SJeremy Fitzhardinge /* 1260d160211SJeremy Fitzhardinge * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 12721631d2dSJuergen Gross * are linked from tx_skb_freelist through tx_link. 1280d160211SJeremy Fitzhardinge */ 12921631d2dSJuergen Gross struct sk_buff *tx_skbs[NET_TX_RING_SIZE]; 13021631d2dSJuergen Gross unsigned short tx_link[NET_TX_RING_SIZE]; 13121631d2dSJuergen Gross #define TX_LINK_NONE 0xffff 132a884daa6SJuergen Gross #define TX_PENDING 0xfffe 1330d160211SJeremy Fitzhardinge grant_ref_t gref_tx_head; 1340d160211SJeremy Fitzhardinge grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 135cefe0078SAnnie Li struct page *grant_tx_page[NET_TX_RING_SIZE]; 1360d160211SJeremy Fitzhardinge unsigned tx_skb_freelist; 137a884daa6SJuergen Gross unsigned int tx_pend_queue; 1380d160211SJeremy Fitzhardinge 13984284d3cSJeremy Fitzhardinge spinlock_t rx_lock ____cacheline_aligned_in_smp; 14084284d3cSJeremy Fitzhardinge struct xen_netif_rx_front_ring rx; 14184284d3cSJeremy Fitzhardinge int rx_ring_ref; 14284284d3cSJeremy Fitzhardinge 14384284d3cSJeremy Fitzhardinge struct timer_list rx_refill_timer; 14484284d3cSJeremy Fitzhardinge 1450d160211SJeremy Fitzhardinge struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 1460d160211SJeremy Fitzhardinge grant_ref_t gref_rx_head; 1470d160211SJeremy Fitzhardinge grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 1486c5aa6fcSDenis Kirjanov 149b27d4795SJuergen Gross unsigned int rx_rsp_unconsumed; 150b27d4795SJuergen Gross spinlock_t rx_cons_lock; 151b27d4795SJuergen Gross 1526c5aa6fcSDenis Kirjanov struct page_pool *page_pool; 1536c5aa6fcSDenis Kirjanov struct xdp_rxq_info xdp_rxq; 1542688fcb7SAndrew J. Bennieston }; 1552688fcb7SAndrew J. Bennieston 1562688fcb7SAndrew J. Bennieston struct netfront_info { 1572688fcb7SAndrew J. Bennieston struct list_head list; 1582688fcb7SAndrew J. Bennieston struct net_device *netdev; 1592688fcb7SAndrew J. Bennieston 1602688fcb7SAndrew J. Bennieston struct xenbus_device *xbdev; 1612688fcb7SAndrew J. Bennieston 1622688fcb7SAndrew J. Bennieston /* Multi-queue support */ 1632688fcb7SAndrew J. Bennieston struct netfront_queue *queues; 164e0ce4af9SIan Campbell 165e0ce4af9SIan Campbell /* Statistics */ 166900e1833SDavid Vrabel struct netfront_stats __percpu *rx_stats; 167900e1833SDavid Vrabel struct netfront_stats __percpu *tx_stats; 168e00f85beSstephen hemminger 1696c5aa6fcSDenis Kirjanov /* XDP state */ 1706c5aa6fcSDenis Kirjanov bool netback_has_xdp_headroom; 1716c5aa6fcSDenis Kirjanov bool netfront_xdp_enabled; 1726c5aa6fcSDenis Kirjanov 173a884daa6SJuergen Gross /* Is device behaving sane? */ 174a884daa6SJuergen Gross bool broken; 175a884daa6SJuergen Gross 1762688fcb7SAndrew J. Bennieston atomic_t rx_gso_checksum_fixup; 1770d160211SJeremy Fitzhardinge }; 1780d160211SJeremy Fitzhardinge 1790d160211SJeremy Fitzhardinge struct netfront_rx_info { 1800d160211SJeremy Fitzhardinge struct xen_netif_rx_response rx; 1810d160211SJeremy Fitzhardinge struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 1820d160211SJeremy Fitzhardinge }; 1830d160211SJeremy Fitzhardinge 1840d160211SJeremy Fitzhardinge /* 1850d160211SJeremy Fitzhardinge * Access macros for acquiring freeing slots in tx_skbs[]. 1860d160211SJeremy Fitzhardinge */ 1870d160211SJeremy Fitzhardinge 18821631d2dSJuergen Gross static void add_id_to_list(unsigned *head, unsigned short *list, 1890d160211SJeremy Fitzhardinge unsigned short id) 1900d160211SJeremy Fitzhardinge { 19121631d2dSJuergen Gross list[id] = *head; 1920d160211SJeremy Fitzhardinge *head = id; 1930d160211SJeremy Fitzhardinge } 1940d160211SJeremy Fitzhardinge 19521631d2dSJuergen Gross static unsigned short get_id_from_list(unsigned *head, unsigned short *list) 1960d160211SJeremy Fitzhardinge { 1970d160211SJeremy Fitzhardinge unsigned int id = *head; 19821631d2dSJuergen Gross 19921631d2dSJuergen Gross if (id != TX_LINK_NONE) { 20021631d2dSJuergen Gross *head = list[id]; 20121631d2dSJuergen Gross list[id] = TX_LINK_NONE; 20221631d2dSJuergen Gross } 2030d160211SJeremy Fitzhardinge return id; 2040d160211SJeremy Fitzhardinge } 2050d160211SJeremy Fitzhardinge 2060d160211SJeremy Fitzhardinge static int xennet_rxidx(RING_IDX idx) 2070d160211SJeremy Fitzhardinge { 2080d160211SJeremy Fitzhardinge return idx & (NET_RX_RING_SIZE - 1); 2090d160211SJeremy Fitzhardinge } 2100d160211SJeremy Fitzhardinge 2112688fcb7SAndrew J. Bennieston static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 2120d160211SJeremy Fitzhardinge RING_IDX ri) 2130d160211SJeremy Fitzhardinge { 2140d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2152688fcb7SAndrew J. Bennieston struct sk_buff *skb = queue->rx_skbs[i]; 2162688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 2170d160211SJeremy Fitzhardinge return skb; 2180d160211SJeremy Fitzhardinge } 2190d160211SJeremy Fitzhardinge 2202688fcb7SAndrew J. Bennieston static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 2210d160211SJeremy Fitzhardinge RING_IDX ri) 2220d160211SJeremy Fitzhardinge { 2230d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2242688fcb7SAndrew J. Bennieston grant_ref_t ref = queue->grant_rx_ref[i]; 225145daab2SJuergen Gross queue->grant_rx_ref[i] = INVALID_GRANT_REF; 2260d160211SJeremy Fitzhardinge return ref; 2270d160211SJeremy Fitzhardinge } 2280d160211SJeremy Fitzhardinge 2290d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 23027b917e5STakashi Iwai static const struct attribute_group xennet_dev_group; 2310d160211SJeremy Fitzhardinge #endif 2320d160211SJeremy Fitzhardinge 2333ad9b358SMichał Mirosław static bool xennet_can_sg(struct net_device *dev) 2340d160211SJeremy Fitzhardinge { 2353ad9b358SMichał Mirosław return dev->features & NETIF_F_SG; 2360d160211SJeremy Fitzhardinge } 2370d160211SJeremy Fitzhardinge 2380d160211SJeremy Fitzhardinge 239e99e88a9SKees Cook static void rx_refill_timeout(struct timer_list *t) 2400d160211SJeremy Fitzhardinge { 241e99e88a9SKees Cook struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); 2422688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 2430d160211SJeremy Fitzhardinge } 2440d160211SJeremy Fitzhardinge 2452688fcb7SAndrew J. Bennieston static int netfront_tx_slot_available(struct netfront_queue *queue) 2460d160211SJeremy Fitzhardinge { 2472688fcb7SAndrew J. Bennieston return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 24857f230abSJuergen Gross (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); 2490d160211SJeremy Fitzhardinge } 2500d160211SJeremy Fitzhardinge 2512688fcb7SAndrew J. Bennieston static void xennet_maybe_wake_tx(struct netfront_queue *queue) 2520d160211SJeremy Fitzhardinge { 2532688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 2542688fcb7SAndrew J. Bennieston struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 2550d160211SJeremy Fitzhardinge 2562688fcb7SAndrew J. Bennieston if (unlikely(netif_tx_queue_stopped(dev_queue)) && 2572688fcb7SAndrew J. Bennieston netfront_tx_slot_available(queue) && 2580d160211SJeremy Fitzhardinge likely(netif_running(dev))) 2592688fcb7SAndrew J. Bennieston netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 2600d160211SJeremy Fitzhardinge } 2610d160211SJeremy Fitzhardinge 2621f3c2ebaSDavid Vrabel 2631f3c2ebaSDavid Vrabel static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) 2640d160211SJeremy Fitzhardinge { 2650d160211SJeremy Fitzhardinge struct sk_buff *skb; 2660d160211SJeremy Fitzhardinge struct page *page; 2670d160211SJeremy Fitzhardinge 2682688fcb7SAndrew J. Bennieston skb = __netdev_alloc_skb(queue->info->netdev, 2692688fcb7SAndrew J. Bennieston RX_COPY_THRESHOLD + NET_IP_ALIGN, 2700d160211SJeremy Fitzhardinge GFP_ATOMIC | __GFP_NOWARN); 2710d160211SJeremy Fitzhardinge if (unlikely(!skb)) 2721f3c2ebaSDavid Vrabel return NULL; 273617a20bbSIsaku Yamahata 2746c5aa6fcSDenis Kirjanov page = page_pool_dev_alloc_pages(queue->page_pool); 2756c5aa6fcSDenis Kirjanov if (unlikely(!page)) { 2760d160211SJeremy Fitzhardinge kfree_skb(skb); 2771f3c2ebaSDavid Vrabel return NULL; 2780d160211SJeremy Fitzhardinge } 279093b9c71SJan Beulich skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 2800d160211SJeremy Fitzhardinge 2811f3c2ebaSDavid Vrabel /* Align ip header to a 16 bytes boundary */ 2821f3c2ebaSDavid Vrabel skb_reserve(skb, NET_IP_ALIGN); 2832688fcb7SAndrew J. Bennieston skb->dev = queue->info->netdev; 2840d160211SJeremy Fitzhardinge 2851f3c2ebaSDavid Vrabel return skb; 2861f3c2ebaSDavid Vrabel } 2871f3c2ebaSDavid Vrabel 2881f3c2ebaSDavid Vrabel 2891f3c2ebaSDavid Vrabel static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 2901f3c2ebaSDavid Vrabel { 2911f3c2ebaSDavid Vrabel RING_IDX req_prod = queue->rx.req_prod_pvt; 2921f3c2ebaSDavid Vrabel int notify; 293538d9291SVineeth Remanan Pillai int err = 0; 2941f3c2ebaSDavid Vrabel 2951f3c2ebaSDavid Vrabel if (unlikely(!netif_carrier_ok(queue->info->netdev))) 2961f3c2ebaSDavid Vrabel return; 2971f3c2ebaSDavid Vrabel 2981f3c2ebaSDavid Vrabel for (req_prod = queue->rx.req_prod_pvt; 2991f3c2ebaSDavid Vrabel req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; 3001f3c2ebaSDavid Vrabel req_prod++) { 3011f3c2ebaSDavid Vrabel struct sk_buff *skb; 3021f3c2ebaSDavid Vrabel unsigned short id; 3031f3c2ebaSDavid Vrabel grant_ref_t ref; 30430c5d7f0SJulien Grall struct page *page; 3051f3c2ebaSDavid Vrabel struct xen_netif_rx_request *req; 3061f3c2ebaSDavid Vrabel 3071f3c2ebaSDavid Vrabel skb = xennet_alloc_one_rx_buffer(queue); 308538d9291SVineeth Remanan Pillai if (!skb) { 309538d9291SVineeth Remanan Pillai err = -ENOMEM; 3101f3c2ebaSDavid Vrabel break; 311538d9291SVineeth Remanan Pillai } 3121f3c2ebaSDavid Vrabel 3131f3c2ebaSDavid Vrabel id = xennet_rxidx(req_prod); 3140d160211SJeremy Fitzhardinge 3152688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[id]); 3162688fcb7SAndrew J. Bennieston queue->rx_skbs[id] = skb; 3170d160211SJeremy Fitzhardinge 3182688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 319269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 3202688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = ref; 3210d160211SJeremy Fitzhardinge 32230c5d7f0SJulien Grall page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 3230d160211SJeremy Fitzhardinge 3241f3c2ebaSDavid Vrabel req = RING_GET_REQUEST(&queue->rx, req_prod); 32530c5d7f0SJulien Grall gnttab_page_grant_foreign_access_ref_one(ref, 3262688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id, 32730c5d7f0SJulien Grall page, 3280d160211SJeremy Fitzhardinge 0); 3290d160211SJeremy Fitzhardinge req->id = id; 3300d160211SJeremy Fitzhardinge req->gref = ref; 3310d160211SJeremy Fitzhardinge } 3320d160211SJeremy Fitzhardinge 3331f3c2ebaSDavid Vrabel queue->rx.req_prod_pvt = req_prod; 3341f3c2ebaSDavid Vrabel 335538d9291SVineeth Remanan Pillai /* Try again later if there are not enough requests or skb allocation 336538d9291SVineeth Remanan Pillai * failed. 337538d9291SVineeth Remanan Pillai * Enough requests is quantified as the sum of newly created slots and 338538d9291SVineeth Remanan Pillai * the unconsumed slots at the backend. 339538d9291SVineeth Remanan Pillai */ 340538d9291SVineeth Remanan Pillai if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || 341538d9291SVineeth Remanan Pillai unlikely(err)) { 3421f3c2ebaSDavid Vrabel mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 3431f3c2ebaSDavid Vrabel return; 3441f3c2ebaSDavid Vrabel } 3451f3c2ebaSDavid Vrabel 3462688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 3470d160211SJeremy Fitzhardinge if (notify) 3482688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 3490d160211SJeremy Fitzhardinge } 3500d160211SJeremy Fitzhardinge 3510d160211SJeremy Fitzhardinge static int xennet_open(struct net_device *dev) 3520d160211SJeremy Fitzhardinge { 3530d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 3542688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 3552688fcb7SAndrew J. Bennieston unsigned int i = 0; 3562688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 3570d160211SJeremy Fitzhardinge 358a884daa6SJuergen Gross if (!np->queues || np->broken) 359f599c64fSRoss Lagerwall return -ENODEV; 360f599c64fSRoss Lagerwall 3612688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 3622688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 3632688fcb7SAndrew J. Bennieston napi_enable(&queue->napi); 364bea3348eSStephen Hemminger 3652688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 3660d160211SJeremy Fitzhardinge if (netif_carrier_ok(dev)) { 3672688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 3682688fcb7SAndrew J. Bennieston queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 3692688fcb7SAndrew J. Bennieston if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 3702688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 3710d160211SJeremy Fitzhardinge } 3722688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 3732688fcb7SAndrew J. Bennieston } 3740d160211SJeremy Fitzhardinge 3752688fcb7SAndrew J. Bennieston netif_tx_start_all_queues(dev); 3760d160211SJeremy Fitzhardinge 3770d160211SJeremy Fitzhardinge return 0; 3780d160211SJeremy Fitzhardinge } 3790d160211SJeremy Fitzhardinge 380b27d4795SJuergen Gross static bool xennet_tx_buf_gc(struct netfront_queue *queue) 3810d160211SJeremy Fitzhardinge { 3820d160211SJeremy Fitzhardinge RING_IDX cons, prod; 3830d160211SJeremy Fitzhardinge unsigned short id; 3840d160211SJeremy Fitzhardinge struct sk_buff *skb; 3857d0105b5SMalcolm Crossley bool more_to_do; 386b27d4795SJuergen Gross bool work_done = false; 387a884daa6SJuergen Gross const struct device *dev = &queue->info->netdev->dev; 3880d160211SJeremy Fitzhardinge 3892688fcb7SAndrew J. Bennieston BUG_ON(!netif_carrier_ok(queue->info->netdev)); 3900d160211SJeremy Fitzhardinge 3910d160211SJeremy Fitzhardinge do { 3922688fcb7SAndrew J. Bennieston prod = queue->tx.sring->rsp_prod; 393a884daa6SJuergen Gross if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) { 394a884daa6SJuergen Gross dev_alert(dev, "Illegal number of responses %u\n", 395a884daa6SJuergen Gross prod - queue->tx.rsp_cons); 396a884daa6SJuergen Gross goto err; 397a884daa6SJuergen Gross } 3980d160211SJeremy Fitzhardinge rmb(); /* Ensure we see responses up to 'rp'. */ 3990d160211SJeremy Fitzhardinge 4002688fcb7SAndrew J. Bennieston for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 4018446066bSJuergen Gross struct xen_netif_tx_response txrsp; 4020d160211SJeremy Fitzhardinge 403b27d4795SJuergen Gross work_done = true; 404b27d4795SJuergen Gross 4058446066bSJuergen Gross RING_COPY_RESPONSE(&queue->tx, cons, &txrsp); 4068446066bSJuergen Gross if (txrsp.status == XEN_NETIF_RSP_NULL) 4070d160211SJeremy Fitzhardinge continue; 4080d160211SJeremy Fitzhardinge 4098446066bSJuergen Gross id = txrsp.id; 410a884daa6SJuergen Gross if (id >= RING_SIZE(&queue->tx)) { 411a884daa6SJuergen Gross dev_alert(dev, 412a884daa6SJuergen Gross "Response has incorrect id (%u)\n", 413a884daa6SJuergen Gross id); 414a884daa6SJuergen Gross goto err; 415a884daa6SJuergen Gross } 416a884daa6SJuergen Gross if (queue->tx_link[id] != TX_PENDING) { 417a884daa6SJuergen Gross dev_alert(dev, 418a884daa6SJuergen Gross "Response for inactive request\n"); 419a884daa6SJuergen Gross goto err; 420a884daa6SJuergen Gross } 421a884daa6SJuergen Gross 422a884daa6SJuergen Gross queue->tx_link[id] = TX_LINK_NONE; 42321631d2dSJuergen Gross skb = queue->tx_skbs[id]; 42421631d2dSJuergen Gross queue->tx_skbs[id] = NULL; 42531185df7SJuergen Gross if (unlikely(!gnttab_end_foreign_access_ref( 426c94b731dSJuergen Gross queue->grant_tx_ref[id]))) { 427a884daa6SJuergen Gross dev_alert(dev, 428a884daa6SJuergen Gross "Grant still in use by backend domain\n"); 429a884daa6SJuergen Gross goto err; 4300d160211SJeremy Fitzhardinge } 4310d160211SJeremy Fitzhardinge gnttab_release_grant_reference( 4322688fcb7SAndrew J. Bennieston &queue->gref_tx_head, queue->grant_tx_ref[id]); 433145daab2SJuergen Gross queue->grant_tx_ref[id] = INVALID_GRANT_REF; 4342688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = NULL; 43521631d2dSJuergen Gross add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id); 4360d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 4370d160211SJeremy Fitzhardinge } 4380d160211SJeremy Fitzhardinge 4392688fcb7SAndrew J. Bennieston queue->tx.rsp_cons = prod; 4400d160211SJeremy Fitzhardinge 4417d0105b5SMalcolm Crossley RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); 4427d0105b5SMalcolm Crossley } while (more_to_do); 4430d160211SJeremy Fitzhardinge 4442688fcb7SAndrew J. Bennieston xennet_maybe_wake_tx(queue); 445a884daa6SJuergen Gross 446b27d4795SJuergen Gross return work_done; 447a884daa6SJuergen Gross 448a884daa6SJuergen Gross err: 449a884daa6SJuergen Gross queue->info->broken = true; 450a884daa6SJuergen Gross dev_alert(dev, "Disabled for further use\n"); 451b27d4795SJuergen Gross 452b27d4795SJuergen Gross return work_done; 4530d160211SJeremy Fitzhardinge } 4540d160211SJeremy Fitzhardinge 45530c5d7f0SJulien Grall struct xennet_gnttab_make_txreq { 45630c5d7f0SJulien Grall struct netfront_queue *queue; 45730c5d7f0SJulien Grall struct sk_buff *skb; 45830c5d7f0SJulien Grall struct page *page; 459162081ecSJuergen Gross struct xen_netif_tx_request *tx; /* Last request on ring page */ 460162081ecSJuergen Gross struct xen_netif_tx_request tx_local; /* Last request local copy*/ 46130c5d7f0SJulien Grall unsigned int size; 46230c5d7f0SJulien Grall }; 46330c5d7f0SJulien Grall 46430c5d7f0SJulien Grall static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, 46530c5d7f0SJulien Grall unsigned int len, void *data) 4660d160211SJeremy Fitzhardinge { 46730c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data; 4680d160211SJeremy Fitzhardinge unsigned int id; 469a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx; 4700d160211SJeremy Fitzhardinge grant_ref_t ref; 47130c5d7f0SJulien Grall /* convenient aliases */ 47230c5d7f0SJulien Grall struct page *page = info->page; 47330c5d7f0SJulien Grall struct netfront_queue *queue = info->queue; 47430c5d7f0SJulien Grall struct sk_buff *skb = info->skb; 4750d160211SJeremy Fitzhardinge 47621631d2dSJuergen Gross id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link); 477a55e8bb8SDavid Vrabel tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 4782688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 479269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 4800d160211SJeremy Fitzhardinge 48130c5d7f0SJulien Grall gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 48230c5d7f0SJulien Grall gfn, GNTMAP_readonly); 4830d160211SJeremy Fitzhardinge 48421631d2dSJuergen Gross queue->tx_skbs[id] = skb; 485a55e8bb8SDavid Vrabel queue->grant_tx_page[id] = page; 486a55e8bb8SDavid Vrabel queue->grant_tx_ref[id] = ref; 487a55e8bb8SDavid Vrabel 488162081ecSJuergen Gross info->tx_local.id = id; 489162081ecSJuergen Gross info->tx_local.gref = ref; 490162081ecSJuergen Gross info->tx_local.offset = offset; 491162081ecSJuergen Gross info->tx_local.size = len; 492162081ecSJuergen Gross info->tx_local.flags = 0; 493162081ecSJuergen Gross 494162081ecSJuergen Gross *tx = info->tx_local; 495a55e8bb8SDavid Vrabel 496a884daa6SJuergen Gross /* 497a884daa6SJuergen Gross * Put the request in the pending queue, it will be set to be pending 498a884daa6SJuergen Gross * when the producer index is about to be raised. 499a884daa6SJuergen Gross */ 500a884daa6SJuergen Gross add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id); 501a884daa6SJuergen Gross 50230c5d7f0SJulien Grall info->tx = tx; 503162081ecSJuergen Gross info->size += info->tx_local.size; 50430c5d7f0SJulien Grall } 50530c5d7f0SJulien Grall 50630c5d7f0SJulien Grall static struct xen_netif_tx_request *xennet_make_first_txreq( 507162081ecSJuergen Gross struct xennet_gnttab_make_txreq *info, 508162081ecSJuergen Gross unsigned int offset, unsigned int len) 50930c5d7f0SJulien Grall { 510162081ecSJuergen Gross info->size = 0; 51130c5d7f0SJulien Grall 512162081ecSJuergen Gross gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info); 51330c5d7f0SJulien Grall 514162081ecSJuergen Gross return info->tx; 51530c5d7f0SJulien Grall } 51630c5d7f0SJulien Grall 51730c5d7f0SJulien Grall static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, 51830c5d7f0SJulien Grall unsigned int len, void *data) 51930c5d7f0SJulien Grall { 52030c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data; 52130c5d7f0SJulien Grall 52230c5d7f0SJulien Grall info->tx->flags |= XEN_NETTXF_more_data; 52330c5d7f0SJulien Grall skb_get(info->skb); 52430c5d7f0SJulien Grall xennet_tx_setup_grant(gfn, offset, len, data); 5250d160211SJeremy Fitzhardinge } 5260d160211SJeremy Fitzhardinge 527162081ecSJuergen Gross static void xennet_make_txreqs( 528162081ecSJuergen Gross struct xennet_gnttab_make_txreq *info, 529162081ecSJuergen Gross struct page *page, 530a55e8bb8SDavid Vrabel unsigned int offset, unsigned int len) 531a55e8bb8SDavid Vrabel { 532f36c3747SIan Campbell /* Skip unused frames from start of page */ 533f36c3747SIan Campbell page += offset >> PAGE_SHIFT; 534f36c3747SIan Campbell offset &= ~PAGE_MASK; 535f36c3747SIan Campbell 536a55e8bb8SDavid Vrabel while (len) { 537162081ecSJuergen Gross info->page = page; 538162081ecSJuergen Gross info->size = 0; 53930c5d7f0SJulien Grall 54030c5d7f0SJulien Grall gnttab_foreach_grant_in_range(page, offset, len, 54130c5d7f0SJulien Grall xennet_make_one_txreq, 542162081ecSJuergen Gross info); 54330c5d7f0SJulien Grall 544f36c3747SIan Campbell page++; 545f36c3747SIan Campbell offset = 0; 546162081ecSJuergen Gross len -= info->size; 5470d160211SJeremy Fitzhardinge } 5480d160211SJeremy Fitzhardinge } 5490d160211SJeremy Fitzhardinge 550f36c3747SIan Campbell /* 551e84448d5SDavid Vrabel * Count how many ring slots are required to send this skb. Each frag 552e84448d5SDavid Vrabel * might be a compound page. 553f36c3747SIan Campbell */ 554e84448d5SDavid Vrabel static int xennet_count_skb_slots(struct sk_buff *skb) 555f36c3747SIan Campbell { 556f36c3747SIan Campbell int i, frags = skb_shinfo(skb)->nr_frags; 55730c5d7f0SJulien Grall int slots; 558e84448d5SDavid Vrabel 55930c5d7f0SJulien Grall slots = gnttab_count_grant(offset_in_page(skb->data), 56030c5d7f0SJulien Grall skb_headlen(skb)); 561f36c3747SIan Campbell 562f36c3747SIan Campbell for (i = 0; i < frags; i++) { 563f36c3747SIan Campbell skb_frag_t *frag = skb_shinfo(skb)->frags + i; 564f36c3747SIan Campbell unsigned long size = skb_frag_size(frag); 565b54c9d5bSJonathan Lemon unsigned long offset = skb_frag_off(frag); 566f36c3747SIan Campbell 567f36c3747SIan Campbell /* Skip unused frames from start of page */ 568f36c3747SIan Campbell offset &= ~PAGE_MASK; 569f36c3747SIan Campbell 57030c5d7f0SJulien Grall slots += gnttab_count_grant(offset, size); 571f36c3747SIan Campbell } 572f36c3747SIan Campbell 57330c5d7f0SJulien Grall return slots; 574f36c3747SIan Campbell } 575f36c3747SIan Campbell 57650ee6061SAndrew J. Bennieston static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 577a350ecceSPaolo Abeni struct net_device *sb_dev) 5782688fcb7SAndrew J. Bennieston { 57950ee6061SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 58050ee6061SAndrew J. Bennieston u32 hash; 58150ee6061SAndrew J. Bennieston u16 queue_idx; 58250ee6061SAndrew J. Bennieston 58350ee6061SAndrew J. Bennieston /* First, check if there is only one queue */ 58450ee6061SAndrew J. Bennieston if (num_queues == 1) { 58550ee6061SAndrew J. Bennieston queue_idx = 0; 58650ee6061SAndrew J. Bennieston } else { 58750ee6061SAndrew J. Bennieston hash = skb_get_hash(skb); 58850ee6061SAndrew J. Bennieston queue_idx = hash % num_queues; 58950ee6061SAndrew J. Bennieston } 59050ee6061SAndrew J. Bennieston 59150ee6061SAndrew J. Bennieston return queue_idx; 5922688fcb7SAndrew J. Bennieston } 5932688fcb7SAndrew J. Bennieston 594a884daa6SJuergen Gross static void xennet_mark_tx_pending(struct netfront_queue *queue) 595a884daa6SJuergen Gross { 596a884daa6SJuergen Gross unsigned int i; 597a884daa6SJuergen Gross 598a884daa6SJuergen Gross while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) != 599a884daa6SJuergen Gross TX_LINK_NONE) 600a884daa6SJuergen Gross queue->tx_link[i] = TX_PENDING; 601a884daa6SJuergen Gross } 602a884daa6SJuergen Gross 6036c5aa6fcSDenis Kirjanov static int xennet_xdp_xmit_one(struct net_device *dev, 6046c5aa6fcSDenis Kirjanov struct netfront_queue *queue, 6056c5aa6fcSDenis Kirjanov struct xdp_frame *xdpf) 6066c5aa6fcSDenis Kirjanov { 6076c5aa6fcSDenis Kirjanov struct netfront_info *np = netdev_priv(dev); 6086c5aa6fcSDenis Kirjanov struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 609162081ecSJuergen Gross struct xennet_gnttab_make_txreq info = { 610162081ecSJuergen Gross .queue = queue, 611162081ecSJuergen Gross .skb = NULL, 612162081ecSJuergen Gross .page = virt_to_page(xdpf->data), 613162081ecSJuergen Gross }; 6146c5aa6fcSDenis Kirjanov int notify; 6156c5aa6fcSDenis Kirjanov 616162081ecSJuergen Gross xennet_make_first_txreq(&info, 6176c5aa6fcSDenis Kirjanov offset_in_page(xdpf->data), 6186c5aa6fcSDenis Kirjanov xdpf->len); 6196c5aa6fcSDenis Kirjanov 620a884daa6SJuergen Gross xennet_mark_tx_pending(queue); 621a884daa6SJuergen Gross 6226c5aa6fcSDenis Kirjanov RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 6236c5aa6fcSDenis Kirjanov if (notify) 6246c5aa6fcSDenis Kirjanov notify_remote_via_irq(queue->tx_irq); 6256c5aa6fcSDenis Kirjanov 6266c5aa6fcSDenis Kirjanov u64_stats_update_begin(&tx_stats->syncp); 6276c5aa6fcSDenis Kirjanov tx_stats->bytes += xdpf->len; 6286c5aa6fcSDenis Kirjanov tx_stats->packets++; 6296c5aa6fcSDenis Kirjanov u64_stats_update_end(&tx_stats->syncp); 6306c5aa6fcSDenis Kirjanov 6316c5aa6fcSDenis Kirjanov xennet_tx_buf_gc(queue); 6326c5aa6fcSDenis Kirjanov 6336c5aa6fcSDenis Kirjanov return 0; 6346c5aa6fcSDenis Kirjanov } 6356c5aa6fcSDenis Kirjanov 6366c5aa6fcSDenis Kirjanov static int xennet_xdp_xmit(struct net_device *dev, int n, 6376c5aa6fcSDenis Kirjanov struct xdp_frame **frames, u32 flags) 6386c5aa6fcSDenis Kirjanov { 6396c5aa6fcSDenis Kirjanov unsigned int num_queues = dev->real_num_tx_queues; 6406c5aa6fcSDenis Kirjanov struct netfront_info *np = netdev_priv(dev); 6416c5aa6fcSDenis Kirjanov struct netfront_queue *queue = NULL; 6426c5aa6fcSDenis Kirjanov unsigned long irq_flags; 643fdc13979SLorenzo Bianconi int nxmit = 0; 644fdc13979SLorenzo Bianconi int i; 6456c5aa6fcSDenis Kirjanov 646a884daa6SJuergen Gross if (unlikely(np->broken)) 647a884daa6SJuergen Gross return -ENODEV; 6486c5aa6fcSDenis Kirjanov if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 6496c5aa6fcSDenis Kirjanov return -EINVAL; 6506c5aa6fcSDenis Kirjanov 6516c5aa6fcSDenis Kirjanov queue = &np->queues[smp_processor_id() % num_queues]; 6526c5aa6fcSDenis Kirjanov 6536c5aa6fcSDenis Kirjanov spin_lock_irqsave(&queue->tx_lock, irq_flags); 6546c5aa6fcSDenis Kirjanov for (i = 0; i < n; i++) { 6556c5aa6fcSDenis Kirjanov struct xdp_frame *xdpf = frames[i]; 6566c5aa6fcSDenis Kirjanov 6576c5aa6fcSDenis Kirjanov if (!xdpf) 6586c5aa6fcSDenis Kirjanov continue; 659fdc13979SLorenzo Bianconi if (xennet_xdp_xmit_one(dev, queue, xdpf)) 660fdc13979SLorenzo Bianconi break; 661fdc13979SLorenzo Bianconi nxmit++; 6626c5aa6fcSDenis Kirjanov } 6636c5aa6fcSDenis Kirjanov spin_unlock_irqrestore(&queue->tx_lock, irq_flags); 6646c5aa6fcSDenis Kirjanov 665fdc13979SLorenzo Bianconi return nxmit; 6666c5aa6fcSDenis Kirjanov } 6676c5aa6fcSDenis Kirjanov 6686c5aa6fcSDenis Kirjanov 66930c5d7f0SJulien Grall #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 67030c5d7f0SJulien Grall 67124a94b3cSLuc Van Oostenryck static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 6720d160211SJeremy Fitzhardinge { 6730d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 674900e1833SDavid Vrabel struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 675162081ecSJuergen Gross struct xen_netif_tx_request *first_tx; 676a55e8bb8SDavid Vrabel unsigned int i; 6770d160211SJeremy Fitzhardinge int notify; 678f36c3747SIan Campbell int slots; 679a55e8bb8SDavid Vrabel struct page *page; 680a55e8bb8SDavid Vrabel unsigned int offset; 681a55e8bb8SDavid Vrabel unsigned int len; 682cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 6832688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 684162081ecSJuergen Gross struct xennet_gnttab_make_txreq info = { }; 6852688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 6862688fcb7SAndrew J. Bennieston u16 queue_index; 687fd07160bSVitaly Kuznetsov struct sk_buff *nskb; 6882688fcb7SAndrew J. Bennieston 6892688fcb7SAndrew J. Bennieston /* Drop the packet if no queues are set up */ 6902688fcb7SAndrew J. Bennieston if (num_queues < 1) 6912688fcb7SAndrew J. Bennieston goto drop; 692a884daa6SJuergen Gross if (unlikely(np->broken)) 693a884daa6SJuergen Gross goto drop; 6942688fcb7SAndrew J. Bennieston /* Determine which queue to transmit this SKB on */ 6952688fcb7SAndrew J. Bennieston queue_index = skb_get_queue_mapping(skb); 6962688fcb7SAndrew J. Bennieston queue = &np->queues[queue_index]; 6970d160211SJeremy Fitzhardinge 6989ecd1a75SWei Liu /* If skb->len is too big for wire format, drop skb and alert 6999ecd1a75SWei Liu * user about misconfiguration. 7009ecd1a75SWei Liu */ 7019ecd1a75SWei Liu if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 7029ecd1a75SWei Liu net_alert_ratelimited( 7039ecd1a75SWei Liu "xennet: skb->len = %u, too big for wire format\n", 7049ecd1a75SWei Liu skb->len); 7059ecd1a75SWei Liu goto drop; 7069ecd1a75SWei Liu } 7079ecd1a75SWei Liu 708e84448d5SDavid Vrabel slots = xennet_count_skb_slots(skb); 70930c5d7f0SJulien Grall if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) { 71097a6d1bbSZoltan Kiss net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 71197a6d1bbSZoltan Kiss slots, skb->len); 71297a6d1bbSZoltan Kiss if (skb_linearize(skb)) 7130d160211SJeremy Fitzhardinge goto drop; 7140d160211SJeremy Fitzhardinge } 7150d160211SJeremy Fitzhardinge 716a55e8bb8SDavid Vrabel page = virt_to_page(skb->data); 717a55e8bb8SDavid Vrabel offset = offset_in_page(skb->data); 718fd07160bSVitaly Kuznetsov 719fd07160bSVitaly Kuznetsov /* The first req should be at least ETH_HLEN size or the packet will be 720fd07160bSVitaly Kuznetsov * dropped by netback. 721fd07160bSVitaly Kuznetsov */ 722fd07160bSVitaly Kuznetsov if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 723fd07160bSVitaly Kuznetsov nskb = skb_copy(skb, GFP_ATOMIC); 724fd07160bSVitaly Kuznetsov if (!nskb) 725fd07160bSVitaly Kuznetsov goto drop; 72662f3250fSEric Dumazet dev_consume_skb_any(skb); 727fd07160bSVitaly Kuznetsov skb = nskb; 728fd07160bSVitaly Kuznetsov page = virt_to_page(skb->data); 729fd07160bSVitaly Kuznetsov offset = offset_in_page(skb->data); 730fd07160bSVitaly Kuznetsov } 731fd07160bSVitaly Kuznetsov 732a55e8bb8SDavid Vrabel len = skb_headlen(skb); 733a55e8bb8SDavid Vrabel 7342688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 7350d160211SJeremy Fitzhardinge 7360d160211SJeremy Fitzhardinge if (unlikely(!netif_carrier_ok(dev) || 737f36c3747SIan Campbell (slots > 1 && !xennet_can_sg(dev)) || 7388b86a61dSJohannes Berg netif_needs_gso(skb, netif_skb_features(skb)))) { 7392688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 7400d160211SJeremy Fitzhardinge goto drop; 7410d160211SJeremy Fitzhardinge } 7420d160211SJeremy Fitzhardinge 743a55e8bb8SDavid Vrabel /* First request for the linear area. */ 744162081ecSJuergen Gross info.queue = queue; 745162081ecSJuergen Gross info.skb = skb; 746162081ecSJuergen Gross info.page = page; 747162081ecSJuergen Gross first_tx = xennet_make_first_txreq(&info, offset, len); 748162081ecSJuergen Gross offset += info.tx_local.size; 74930c5d7f0SJulien Grall if (offset == PAGE_SIZE) { 750a55e8bb8SDavid Vrabel page++; 751a55e8bb8SDavid Vrabel offset = 0; 75230c5d7f0SJulien Grall } 753162081ecSJuergen Gross len -= info.tx_local.size; 7540d160211SJeremy Fitzhardinge 7550d160211SJeremy Fitzhardinge if (skb->ip_summed == CHECKSUM_PARTIAL) 7560d160211SJeremy Fitzhardinge /* local packet? */ 757162081ecSJuergen Gross first_tx->flags |= XEN_NETTXF_csum_blank | 758162081ecSJuergen Gross XEN_NETTXF_data_validated; 7590d160211SJeremy Fitzhardinge else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 7600d160211SJeremy Fitzhardinge /* remote but checksummed. */ 761162081ecSJuergen Gross first_tx->flags |= XEN_NETTXF_data_validated; 7620d160211SJeremy Fitzhardinge 763a55e8bb8SDavid Vrabel /* Optional extra info after the first request. */ 7640d160211SJeremy Fitzhardinge if (skb_shinfo(skb)->gso_size) { 7650d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 7660d160211SJeremy Fitzhardinge 7670d160211SJeremy Fitzhardinge gso = (struct xen_netif_extra_info *) 768a55e8bb8SDavid Vrabel RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 7690d160211SJeremy Fitzhardinge 770162081ecSJuergen Gross first_tx->flags |= XEN_NETTXF_extra_info; 7710d160211SJeremy Fitzhardinge 7720d160211SJeremy Fitzhardinge gso->u.gso.size = skb_shinfo(skb)->gso_size; 7732c0057deSPaul Durrant gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 7742c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV6 : 7752c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV4; 7760d160211SJeremy Fitzhardinge gso->u.gso.pad = 0; 7770d160211SJeremy Fitzhardinge gso->u.gso.features = 0; 7780d160211SJeremy Fitzhardinge 7790d160211SJeremy Fitzhardinge gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 7800d160211SJeremy Fitzhardinge gso->flags = 0; 7810d160211SJeremy Fitzhardinge } 7820d160211SJeremy Fitzhardinge 783a55e8bb8SDavid Vrabel /* Requests for the rest of the linear area. */ 784162081ecSJuergen Gross xennet_make_txreqs(&info, page, offset, len); 7850d160211SJeremy Fitzhardinge 786a55e8bb8SDavid Vrabel /* Requests for all the frags. */ 787a55e8bb8SDavid Vrabel for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 788a55e8bb8SDavid Vrabel skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 789162081ecSJuergen Gross xennet_make_txreqs(&info, skb_frag_page(frag), 790b54c9d5bSJonathan Lemon skb_frag_off(frag), 791a55e8bb8SDavid Vrabel skb_frag_size(frag)); 792a55e8bb8SDavid Vrabel } 793a55e8bb8SDavid Vrabel 794a55e8bb8SDavid Vrabel /* First request has the packet length. */ 795a55e8bb8SDavid Vrabel first_tx->size = skb->len; 7960d160211SJeremy Fitzhardinge 79791ffb9d3SDaniel Drown /* timestamp packet in software */ 79891ffb9d3SDaniel Drown skb_tx_timestamp(skb); 79991ffb9d3SDaniel Drown 800a884daa6SJuergen Gross xennet_mark_tx_pending(queue); 801a884daa6SJuergen Gross 8022688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 8030d160211SJeremy Fitzhardinge if (notify) 8042688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 8050d160211SJeremy Fitzhardinge 806900e1833SDavid Vrabel u64_stats_update_begin(&tx_stats->syncp); 807900e1833SDavid Vrabel tx_stats->bytes += skb->len; 808900e1833SDavid Vrabel tx_stats->packets++; 809900e1833SDavid Vrabel u64_stats_update_end(&tx_stats->syncp); 81010a273a6SJeremy Fitzhardinge 81110a273a6SJeremy Fitzhardinge /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 8122688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 8130d160211SJeremy Fitzhardinge 8142688fcb7SAndrew J. Bennieston if (!netfront_tx_slot_available(queue)) 8152688fcb7SAndrew J. Bennieston netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 8160d160211SJeremy Fitzhardinge 8172688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 8180d160211SJeremy Fitzhardinge 8196ed10654SPatrick McHardy return NETDEV_TX_OK; 8200d160211SJeremy Fitzhardinge 8210d160211SJeremy Fitzhardinge drop: 82209f75cd7SJeff Garzik dev->stats.tx_dropped++; 823979de8a0SEric W. Biederman dev_kfree_skb_any(skb); 8246ed10654SPatrick McHardy return NETDEV_TX_OK; 8250d160211SJeremy Fitzhardinge } 8260d160211SJeremy Fitzhardinge 8270d160211SJeremy Fitzhardinge static int xennet_close(struct net_device *dev) 8280d160211SJeremy Fitzhardinge { 8290d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 8302688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 8312688fcb7SAndrew J. Bennieston unsigned int i; 8322688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 8332688fcb7SAndrew J. Bennieston netif_tx_stop_all_queues(np->netdev); 8342688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 8352688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 8362688fcb7SAndrew J. Bennieston napi_disable(&queue->napi); 8372688fcb7SAndrew J. Bennieston } 8380d160211SJeremy Fitzhardinge return 0; 8390d160211SJeremy Fitzhardinge } 8400d160211SJeremy Fitzhardinge 841dcf4ff7aSMarek Marczykowski-Górecki static void xennet_destroy_queues(struct netfront_info *info) 842dcf4ff7aSMarek Marczykowski-Górecki { 843dcf4ff7aSMarek Marczykowski-Górecki unsigned int i; 844dcf4ff7aSMarek Marczykowski-Górecki 845dcf4ff7aSMarek Marczykowski-Górecki for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 846dcf4ff7aSMarek Marczykowski-Górecki struct netfront_queue *queue = &info->queues[i]; 847dcf4ff7aSMarek Marczykowski-Górecki 848dcf4ff7aSMarek Marczykowski-Górecki if (netif_running(info->netdev)) 849dcf4ff7aSMarek Marczykowski-Górecki napi_disable(&queue->napi); 850dcf4ff7aSMarek Marczykowski-Górecki netif_napi_del(&queue->napi); 851dcf4ff7aSMarek Marczykowski-Górecki } 852dcf4ff7aSMarek Marczykowski-Górecki 853dcf4ff7aSMarek Marczykowski-Górecki kfree(info->queues); 854dcf4ff7aSMarek Marczykowski-Górecki info->queues = NULL; 855dcf4ff7aSMarek Marczykowski-Górecki } 856dcf4ff7aSMarek Marczykowski-Górecki 857dcf4ff7aSMarek Marczykowski-Górecki static void xennet_uninit(struct net_device *dev) 858dcf4ff7aSMarek Marczykowski-Górecki { 859dcf4ff7aSMarek Marczykowski-Górecki struct netfront_info *np = netdev_priv(dev); 860dcf4ff7aSMarek Marczykowski-Górecki xennet_destroy_queues(np); 861dcf4ff7aSMarek Marczykowski-Górecki } 862dcf4ff7aSMarek Marczykowski-Górecki 863b27d4795SJuergen Gross static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val) 864b27d4795SJuergen Gross { 865b27d4795SJuergen Gross unsigned long flags; 866b27d4795SJuergen Gross 867b27d4795SJuergen Gross spin_lock_irqsave(&queue->rx_cons_lock, flags); 868b27d4795SJuergen Gross queue->rx.rsp_cons = val; 8696fac592cSJuergen Gross queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); 870b27d4795SJuergen Gross spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 871b27d4795SJuergen Gross } 872b27d4795SJuergen Gross 8732688fcb7SAndrew J. Bennieston static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 8740d160211SJeremy Fitzhardinge grant_ref_t ref) 8750d160211SJeremy Fitzhardinge { 8762688fcb7SAndrew J. Bennieston int new = xennet_rxidx(queue->rx.req_prod_pvt); 8770d160211SJeremy Fitzhardinge 8782688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[new]); 8792688fcb7SAndrew J. Bennieston queue->rx_skbs[new] = skb; 8802688fcb7SAndrew J. Bennieston queue->grant_rx_ref[new] = ref; 8812688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 8822688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 8832688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt++; 8840d160211SJeremy Fitzhardinge } 8850d160211SJeremy Fitzhardinge 8862688fcb7SAndrew J. Bennieston static int xennet_get_extras(struct netfront_queue *queue, 8870d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras, 8880d160211SJeremy Fitzhardinge RING_IDX rp) 8890d160211SJeremy Fitzhardinge 8900d160211SJeremy Fitzhardinge { 8918446066bSJuergen Gross struct xen_netif_extra_info extra; 8922688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 8932688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 8940d160211SJeremy Fitzhardinge int err = 0; 8950d160211SJeremy Fitzhardinge 8960d160211SJeremy Fitzhardinge do { 8970d160211SJeremy Fitzhardinge struct sk_buff *skb; 8980d160211SJeremy Fitzhardinge grant_ref_t ref; 8990d160211SJeremy Fitzhardinge 9000d160211SJeremy Fitzhardinge if (unlikely(cons + 1 == rp)) { 9010d160211SJeremy Fitzhardinge if (net_ratelimit()) 9020d160211SJeremy Fitzhardinge dev_warn(dev, "Missing extra info\n"); 9030d160211SJeremy Fitzhardinge err = -EBADR; 9040d160211SJeremy Fitzhardinge break; 9050d160211SJeremy Fitzhardinge } 9060d160211SJeremy Fitzhardinge 9078446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, ++cons, &extra); 9080d160211SJeremy Fitzhardinge 9098446066bSJuergen Gross if (unlikely(!extra.type || 9108446066bSJuergen Gross extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 9110d160211SJeremy Fitzhardinge if (net_ratelimit()) 9120d160211SJeremy Fitzhardinge dev_warn(dev, "Invalid extra type: %d\n", 9138446066bSJuergen Gross extra.type); 9140d160211SJeremy Fitzhardinge err = -EINVAL; 9150d160211SJeremy Fitzhardinge } else { 9168446066bSJuergen Gross extras[extra.type - 1] = extra; 9170d160211SJeremy Fitzhardinge } 9180d160211SJeremy Fitzhardinge 9192688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons); 9202688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons); 9212688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 9228446066bSJuergen Gross } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); 9230d160211SJeremy Fitzhardinge 924b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, cons); 9250d160211SJeremy Fitzhardinge return err; 9260d160211SJeremy Fitzhardinge } 9270d160211SJeremy Fitzhardinge 9286c5aa6fcSDenis Kirjanov static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, 9296c5aa6fcSDenis Kirjanov struct xen_netif_rx_response *rx, struct bpf_prog *prog, 9306c5aa6fcSDenis Kirjanov struct xdp_buff *xdp, bool *need_xdp_flush) 9316c5aa6fcSDenis Kirjanov { 9326c5aa6fcSDenis Kirjanov struct xdp_frame *xdpf; 9336c5aa6fcSDenis Kirjanov u32 len = rx->status; 934e44f65fdSColin Ian King u32 act; 9356c5aa6fcSDenis Kirjanov int err; 9366c5aa6fcSDenis Kirjanov 93743b5169dSLorenzo Bianconi xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, 93843b5169dSLorenzo Bianconi &queue->xdp_rxq); 939be9df4afSLorenzo Bianconi xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM, 940be9df4afSLorenzo Bianconi len, false); 9416c5aa6fcSDenis Kirjanov 9426c5aa6fcSDenis Kirjanov act = bpf_prog_run_xdp(prog, xdp); 9436c5aa6fcSDenis Kirjanov switch (act) { 9446c5aa6fcSDenis Kirjanov case XDP_TX: 9456c5aa6fcSDenis Kirjanov get_page(pdata); 9466c5aa6fcSDenis Kirjanov xdpf = xdp_convert_buff_to_frame(xdp); 9476c5aa6fcSDenis Kirjanov err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); 948fdc13979SLorenzo Bianconi if (unlikely(!err)) 949fdc13979SLorenzo Bianconi xdp_return_frame_rx_napi(xdpf); 950fdc13979SLorenzo Bianconi else if (unlikely(err < 0)) 9516c5aa6fcSDenis Kirjanov trace_xdp_exception(queue->info->netdev, prog, act); 9526c5aa6fcSDenis Kirjanov break; 9536c5aa6fcSDenis Kirjanov case XDP_REDIRECT: 9546c5aa6fcSDenis Kirjanov get_page(pdata); 9556c5aa6fcSDenis Kirjanov err = xdp_do_redirect(queue->info->netdev, xdp, prog); 9566c5aa6fcSDenis Kirjanov *need_xdp_flush = true; 9576c5aa6fcSDenis Kirjanov if (unlikely(err)) 9586c5aa6fcSDenis Kirjanov trace_xdp_exception(queue->info->netdev, prog, act); 9596c5aa6fcSDenis Kirjanov break; 9606c5aa6fcSDenis Kirjanov case XDP_PASS: 9616c5aa6fcSDenis Kirjanov case XDP_DROP: 9626c5aa6fcSDenis Kirjanov break; 9636c5aa6fcSDenis Kirjanov 9646c5aa6fcSDenis Kirjanov case XDP_ABORTED: 9656c5aa6fcSDenis Kirjanov trace_xdp_exception(queue->info->netdev, prog, act); 9666c5aa6fcSDenis Kirjanov break; 9676c5aa6fcSDenis Kirjanov 9686c5aa6fcSDenis Kirjanov default: 969c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act); 9706c5aa6fcSDenis Kirjanov } 9716c5aa6fcSDenis Kirjanov 9726c5aa6fcSDenis Kirjanov return act; 9736c5aa6fcSDenis Kirjanov } 9746c5aa6fcSDenis Kirjanov 9752688fcb7SAndrew J. Bennieston static int xennet_get_responses(struct netfront_queue *queue, 9760d160211SJeremy Fitzhardinge struct netfront_rx_info *rinfo, RING_IDX rp, 9776c5aa6fcSDenis Kirjanov struct sk_buff_head *list, 9786c5aa6fcSDenis Kirjanov bool *need_xdp_flush) 9790d160211SJeremy Fitzhardinge { 9808446066bSJuergen Gross struct xen_netif_rx_response *rx = &rinfo->rx, rx_local; 9816c5aa6fcSDenis Kirjanov int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); 9822688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 9832688fcb7SAndrew J. Bennieston struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 9846c5aa6fcSDenis Kirjanov struct xen_netif_extra_info *extras = rinfo->extras; 9852688fcb7SAndrew J. Bennieston grant_ref_t ref = xennet_get_rx_ref(queue, cons); 9866c5aa6fcSDenis Kirjanov struct device *dev = &queue->info->netdev->dev; 9876c5aa6fcSDenis Kirjanov struct bpf_prog *xdp_prog; 9886c5aa6fcSDenis Kirjanov struct xdp_buff xdp; 9897158ff6dSWei Liu int slots = 1; 9900d160211SJeremy Fitzhardinge int err = 0; 9916c5aa6fcSDenis Kirjanov u32 verdict; 9920d160211SJeremy Fitzhardinge 993f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_extra_info) { 9942688fcb7SAndrew J. Bennieston err = xennet_get_extras(queue, extras, rp); 9956c5aa6fcSDenis Kirjanov if (!err) { 9966c5aa6fcSDenis Kirjanov if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) { 9976c5aa6fcSDenis Kirjanov struct xen_netif_extra_info *xdp; 9986c5aa6fcSDenis Kirjanov 9996c5aa6fcSDenis Kirjanov xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; 10006c5aa6fcSDenis Kirjanov rx->offset = xdp->u.xdp.headroom; 10016c5aa6fcSDenis Kirjanov } 10026c5aa6fcSDenis Kirjanov } 10032688fcb7SAndrew J. Bennieston cons = queue->rx.rsp_cons; 10040d160211SJeremy Fitzhardinge } 10050d160211SJeremy Fitzhardinge 10060d160211SJeremy Fitzhardinge for (;;) { 10070d160211SJeremy Fitzhardinge if (unlikely(rx->status < 0 || 100830c5d7f0SJulien Grall rx->offset + rx->status > XEN_PAGE_SIZE)) { 10090d160211SJeremy Fitzhardinge if (net_ratelimit()) 10106c10127dSJulien Grall dev_warn(dev, "rx->offset: %u, size: %d\n", 10110d160211SJeremy Fitzhardinge rx->offset, rx->status); 10122688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 10130d160211SJeremy Fitzhardinge err = -EINVAL; 10140d160211SJeremy Fitzhardinge goto next; 10150d160211SJeremy Fitzhardinge } 10160d160211SJeremy Fitzhardinge 10170d160211SJeremy Fitzhardinge /* 10180d160211SJeremy Fitzhardinge * This definitely indicates a bug, either in this driver or in 10190d160211SJeremy Fitzhardinge * the backend driver. In future this should flag the bad 1020697089dcSWei Liu * situation to the system controller to reboot the backend. 10210d160211SJeremy Fitzhardinge */ 1022145daab2SJuergen Gross if (ref == INVALID_GRANT_REF) { 10230d160211SJeremy Fitzhardinge if (net_ratelimit()) 10240d160211SJeremy Fitzhardinge dev_warn(dev, "Bad rx response id %d.\n", 10250d160211SJeremy Fitzhardinge rx->id); 10260d160211SJeremy Fitzhardinge err = -EINVAL; 10270d160211SJeremy Fitzhardinge goto next; 10280d160211SJeremy Fitzhardinge } 10290d160211SJeremy Fitzhardinge 1030c94b731dSJuergen Gross if (!gnttab_end_foreign_access_ref(ref)) { 103166e3531bSJuergen Gross dev_alert(dev, 103266e3531bSJuergen Gross "Grant still in use by backend domain\n"); 103366e3531bSJuergen Gross queue->info->broken = true; 103466e3531bSJuergen Gross dev_alert(dev, "Disabled for further use\n"); 103566e3531bSJuergen Gross return -EINVAL; 103666e3531bSJuergen Gross } 10370d160211SJeremy Fitzhardinge 10382688fcb7SAndrew J. Bennieston gnttab_release_grant_reference(&queue->gref_rx_head, ref); 10390d160211SJeremy Fitzhardinge 10406c5aa6fcSDenis Kirjanov rcu_read_lock(); 10416c5aa6fcSDenis Kirjanov xdp_prog = rcu_dereference(queue->xdp_prog); 10426c5aa6fcSDenis Kirjanov if (xdp_prog) { 10436c5aa6fcSDenis Kirjanov if (!(rx->flags & XEN_NETRXF_more_data)) { 10446c5aa6fcSDenis Kirjanov /* currently only a single page contains data */ 10456c5aa6fcSDenis Kirjanov verdict = xennet_run_xdp(queue, 10466c5aa6fcSDenis Kirjanov skb_frag_page(&skb_shinfo(skb)->frags[0]), 10476c5aa6fcSDenis Kirjanov rx, xdp_prog, &xdp, need_xdp_flush); 10486c5aa6fcSDenis Kirjanov if (verdict != XDP_PASS) 10496c5aa6fcSDenis Kirjanov err = -EINVAL; 10506c5aa6fcSDenis Kirjanov } else { 10516c5aa6fcSDenis Kirjanov /* drop the frame */ 10526c5aa6fcSDenis Kirjanov err = -EINVAL; 10536c5aa6fcSDenis Kirjanov } 10546c5aa6fcSDenis Kirjanov } 10556c5aa6fcSDenis Kirjanov rcu_read_unlock(); 10560d160211SJeremy Fitzhardinge next: 10576c5aa6fcSDenis Kirjanov __skb_queue_tail(list, skb); 1058f942dc25SIan Campbell if (!(rx->flags & XEN_NETRXF_more_data)) 10590d160211SJeremy Fitzhardinge break; 10600d160211SJeremy Fitzhardinge 10617158ff6dSWei Liu if (cons + slots == rp) { 10620d160211SJeremy Fitzhardinge if (net_ratelimit()) 10637158ff6dSWei Liu dev_warn(dev, "Need more slots\n"); 10640d160211SJeremy Fitzhardinge err = -ENOENT; 10650d160211SJeremy Fitzhardinge break; 10660d160211SJeremy Fitzhardinge } 10670d160211SJeremy Fitzhardinge 10688446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local); 10698446066bSJuergen Gross rx = &rx_local; 10702688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons + slots); 10712688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons + slots); 10727158ff6dSWei Liu slots++; 10730d160211SJeremy Fitzhardinge } 10740d160211SJeremy Fitzhardinge 10757158ff6dSWei Liu if (unlikely(slots > max)) { 10760d160211SJeremy Fitzhardinge if (net_ratelimit()) 1077697089dcSWei Liu dev_warn(dev, "Too many slots\n"); 10780d160211SJeremy Fitzhardinge err = -E2BIG; 10790d160211SJeremy Fitzhardinge } 10800d160211SJeremy Fitzhardinge 10810d160211SJeremy Fitzhardinge if (unlikely(err)) 1082b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, cons + slots); 10830d160211SJeremy Fitzhardinge 10840d160211SJeremy Fitzhardinge return err; 10850d160211SJeremy Fitzhardinge } 10860d160211SJeremy Fitzhardinge 10870d160211SJeremy Fitzhardinge static int xennet_set_skb_gso(struct sk_buff *skb, 10880d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso) 10890d160211SJeremy Fitzhardinge { 10900d160211SJeremy Fitzhardinge if (!gso->u.gso.size) { 10910d160211SJeremy Fitzhardinge if (net_ratelimit()) 1092383eda32SJoe Perches pr_warn("GSO size must not be zero\n"); 10930d160211SJeremy Fitzhardinge return -EINVAL; 10940d160211SJeremy Fitzhardinge } 10950d160211SJeremy Fitzhardinge 10962c0057deSPaul Durrant if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 10972c0057deSPaul Durrant gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 10980d160211SJeremy Fitzhardinge if (net_ratelimit()) 1099383eda32SJoe Perches pr_warn("Bad GSO type %d\n", gso->u.gso.type); 11000d160211SJeremy Fitzhardinge return -EINVAL; 11010d160211SJeremy Fitzhardinge } 11020d160211SJeremy Fitzhardinge 11030d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_size = gso->u.gso.size; 11042c0057deSPaul Durrant skb_shinfo(skb)->gso_type = 11052c0057deSPaul Durrant (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 11062c0057deSPaul Durrant SKB_GSO_TCPV4 : 11072c0057deSPaul Durrant SKB_GSO_TCPV6; 11080d160211SJeremy Fitzhardinge 11090d160211SJeremy Fitzhardinge /* Header must be checked, and gso_segs computed. */ 11100d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 11110d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_segs = 0; 11120d160211SJeremy Fitzhardinge 11130d160211SJeremy Fitzhardinge return 0; 11140d160211SJeremy Fitzhardinge } 11150d160211SJeremy Fitzhardinge 1116a761129eSDongli Zhang static int xennet_fill_frags(struct netfront_queue *queue, 11170d160211SJeremy Fitzhardinge struct sk_buff *skb, 11180d160211SJeremy Fitzhardinge struct sk_buff_head *list) 11190d160211SJeremy Fitzhardinge { 11202688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 11210d160211SJeremy Fitzhardinge struct sk_buff *nskb; 11220d160211SJeremy Fitzhardinge 11230d160211SJeremy Fitzhardinge while ((nskb = __skb_dequeue(list))) { 11248446066bSJuergen Gross struct xen_netif_rx_response rx; 112501c68026SIan Campbell skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 11260d160211SJeremy Fitzhardinge 11278446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, ++cons, &rx); 11288446066bSJuergen Gross 1129d472b3a6SJuergen Gross if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 1130093b9c71SJan Beulich unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 11310d160211SJeremy Fitzhardinge 1132d81c5054SJuergen Gross BUG_ON(pull_to < skb_headlen(skb)); 1133093b9c71SJan Beulich __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 1134093b9c71SJan Beulich } 1135ad4f15dcSJuergen Gross if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 1136b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, 1137b27d4795SJuergen Gross ++cons + skb_queue_len(list)); 1138ad4f15dcSJuergen Gross kfree_skb(nskb); 1139a761129eSDongli Zhang return -ENOENT; 1140ad4f15dcSJuergen Gross } 1141093b9c71SJan Beulich 1142d472b3a6SJuergen Gross skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 1143d472b3a6SJuergen Gross skb_frag_page(nfrag), 11448446066bSJuergen Gross rx.offset, rx.status, PAGE_SIZE); 11450d160211SJeremy Fitzhardinge 11460d160211SJeremy Fitzhardinge skb_shinfo(nskb)->nr_frags = 0; 11470d160211SJeremy Fitzhardinge kfree_skb(nskb); 11480d160211SJeremy Fitzhardinge } 11490d160211SJeremy Fitzhardinge 1150b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, cons); 1151a761129eSDongli Zhang 1152a761129eSDongli Zhang return 0; 11530d160211SJeremy Fitzhardinge } 11540d160211SJeremy Fitzhardinge 1155e0ce4af9SIan Campbell static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 11560d160211SJeremy Fitzhardinge { 1157b5cf66cdSPaul Durrant bool recalculate_partial_csum = false; 1158e0ce4af9SIan Campbell 1159e0ce4af9SIan Campbell /* 1160e0ce4af9SIan Campbell * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 1161e0ce4af9SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO 1162e0ce4af9SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and 1163e0ce4af9SIan Campbell * recalculate the partial checksum. 1164e0ce4af9SIan Campbell */ 1165e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 1166e0ce4af9SIan Campbell struct netfront_info *np = netdev_priv(dev); 11672688fcb7SAndrew J. Bennieston atomic_inc(&np->rx_gso_checksum_fixup); 1168e0ce4af9SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL; 1169b5cf66cdSPaul Durrant recalculate_partial_csum = true; 1170e0ce4af9SIan Campbell } 1171e0ce4af9SIan Campbell 1172e0ce4af9SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 1173e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL) 1174e0ce4af9SIan Campbell return 0; 11750d160211SJeremy Fitzhardinge 1176b5cf66cdSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum); 11770d160211SJeremy Fitzhardinge } 11780d160211SJeremy Fitzhardinge 11792688fcb7SAndrew J. Bennieston static int handle_incoming_queue(struct netfront_queue *queue, 11800d160211SJeremy Fitzhardinge struct sk_buff_head *rxq) 11810d160211SJeremy Fitzhardinge { 1182900e1833SDavid Vrabel struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 11830d160211SJeremy Fitzhardinge int packets_dropped = 0; 11840d160211SJeremy Fitzhardinge struct sk_buff *skb; 11850d160211SJeremy Fitzhardinge 11860d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(rxq)) != NULL) { 11873683243bSIan Campbell int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 11880d160211SJeremy Fitzhardinge 1189093b9c71SJan Beulich if (pull_to > skb_headlen(skb)) 11903683243bSIan Campbell __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 11910d160211SJeremy Fitzhardinge 11920d160211SJeremy Fitzhardinge /* Ethernet work: Delayed to here as it peeks the header. */ 11932688fcb7SAndrew J. Bennieston skb->protocol = eth_type_trans(skb, queue->info->netdev); 1194d554f73dSWei Liu skb_reset_network_header(skb); 11950d160211SJeremy Fitzhardinge 11962688fcb7SAndrew J. Bennieston if (checksum_setup(queue->info->netdev, skb)) { 11970d160211SJeremy Fitzhardinge kfree_skb(skb); 11980d160211SJeremy Fitzhardinge packets_dropped++; 11992688fcb7SAndrew J. Bennieston queue->info->netdev->stats.rx_errors++; 12000d160211SJeremy Fitzhardinge continue; 12010d160211SJeremy Fitzhardinge } 12020d160211SJeremy Fitzhardinge 1203900e1833SDavid Vrabel u64_stats_update_begin(&rx_stats->syncp); 1204900e1833SDavid Vrabel rx_stats->packets++; 1205900e1833SDavid Vrabel rx_stats->bytes += skb->len; 1206900e1833SDavid Vrabel u64_stats_update_end(&rx_stats->syncp); 12070d160211SJeremy Fitzhardinge 12080d160211SJeremy Fitzhardinge /* Pass it up. */ 12092688fcb7SAndrew J. Bennieston napi_gro_receive(&queue->napi, skb); 12100d160211SJeremy Fitzhardinge } 12110d160211SJeremy Fitzhardinge 12120d160211SJeremy Fitzhardinge return packets_dropped; 12130d160211SJeremy Fitzhardinge } 12140d160211SJeremy Fitzhardinge 1215bea3348eSStephen Hemminger static int xennet_poll(struct napi_struct *napi, int budget) 12160d160211SJeremy Fitzhardinge { 12172688fcb7SAndrew J. Bennieston struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 12182688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 12190d160211SJeremy Fitzhardinge struct sk_buff *skb; 12200d160211SJeremy Fitzhardinge struct netfront_rx_info rinfo; 12210d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo.rx; 12220d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo.extras; 12230d160211SJeremy Fitzhardinge RING_IDX i, rp; 1224bea3348eSStephen Hemminger int work_done; 12250d160211SJeremy Fitzhardinge struct sk_buff_head rxq; 12260d160211SJeremy Fitzhardinge struct sk_buff_head errq; 12270d160211SJeremy Fitzhardinge struct sk_buff_head tmpq; 12280d160211SJeremy Fitzhardinge int err; 12296c5aa6fcSDenis Kirjanov bool need_xdp_flush = false; 12300d160211SJeremy Fitzhardinge 12312688fcb7SAndrew J. Bennieston spin_lock(&queue->rx_lock); 12320d160211SJeremy Fitzhardinge 12330d160211SJeremy Fitzhardinge skb_queue_head_init(&rxq); 12340d160211SJeremy Fitzhardinge skb_queue_head_init(&errq); 12350d160211SJeremy Fitzhardinge skb_queue_head_init(&tmpq); 12360d160211SJeremy Fitzhardinge 12372688fcb7SAndrew J. Bennieston rp = queue->rx.sring->rsp_prod; 1238a884daa6SJuergen Gross if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) { 1239a884daa6SJuergen Gross dev_alert(&dev->dev, "Illegal number of responses %u\n", 1240a884daa6SJuergen Gross rp - queue->rx.rsp_cons); 1241a884daa6SJuergen Gross queue->info->broken = true; 1242a884daa6SJuergen Gross spin_unlock(&queue->rx_lock); 1243a884daa6SJuergen Gross return 0; 1244a884daa6SJuergen Gross } 12450d160211SJeremy Fitzhardinge rmb(); /* Ensure we see queued responses up to 'rp'. */ 12460d160211SJeremy Fitzhardinge 12472688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 12480d160211SJeremy Fitzhardinge work_done = 0; 12490d160211SJeremy Fitzhardinge while ((i != rp) && (work_done < budget)) { 12508446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, i, rx); 12510d160211SJeremy Fitzhardinge memset(extras, 0, sizeof(rinfo.extras)); 12520d160211SJeremy Fitzhardinge 12536c5aa6fcSDenis Kirjanov err = xennet_get_responses(queue, &rinfo, rp, &tmpq, 12546c5aa6fcSDenis Kirjanov &need_xdp_flush); 12550d160211SJeremy Fitzhardinge 12560d160211SJeremy Fitzhardinge if (unlikely(err)) { 125766e3531bSJuergen Gross if (queue->info->broken) { 125866e3531bSJuergen Gross spin_unlock(&queue->rx_lock); 125966e3531bSJuergen Gross return 0; 126066e3531bSJuergen Gross } 12610d160211SJeremy Fitzhardinge err: 12620d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(&tmpq))) 12630d160211SJeremy Fitzhardinge __skb_queue_tail(&errq, skb); 126409f75cd7SJeff Garzik dev->stats.rx_errors++; 12652688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 12660d160211SJeremy Fitzhardinge continue; 12670d160211SJeremy Fitzhardinge } 12680d160211SJeremy Fitzhardinge 12690d160211SJeremy Fitzhardinge skb = __skb_dequeue(&tmpq); 12700d160211SJeremy Fitzhardinge 12710d160211SJeremy Fitzhardinge if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 12720d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 12730d160211SJeremy Fitzhardinge gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 12740d160211SJeremy Fitzhardinge 12750d160211SJeremy Fitzhardinge if (unlikely(xennet_set_skb_gso(skb, gso))) { 12760d160211SJeremy Fitzhardinge __skb_queue_head(&tmpq, skb); 1277b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, 1278b27d4795SJuergen Gross queue->rx.rsp_cons + 1279b27d4795SJuergen Gross skb_queue_len(&tmpq)); 12800d160211SJeremy Fitzhardinge goto err; 12810d160211SJeremy Fitzhardinge } 12820d160211SJeremy Fitzhardinge } 12830d160211SJeremy Fitzhardinge 12843683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = rx->status; 12853683243bSIan Campbell if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 12863683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 12870d160211SJeremy Fitzhardinge 1288b54c9d5bSJonathan Lemon skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); 12893683243bSIan Campbell skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 12903683243bSIan Campbell skb->data_len = rx->status; 1291093b9c71SJan Beulich skb->len += rx->status; 12920d160211SJeremy Fitzhardinge 1293a761129eSDongli Zhang if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) 1294ad4f15dcSJuergen Gross goto err; 12950d160211SJeremy Fitzhardinge 1296f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_csum_blank) 12970d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_PARTIAL; 1298f942dc25SIan Campbell else if (rx->flags & XEN_NETRXF_data_validated) 12990d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_UNNECESSARY; 13000d160211SJeremy Fitzhardinge 13010d160211SJeremy Fitzhardinge __skb_queue_tail(&rxq, skb); 13020d160211SJeremy Fitzhardinge 1303b27d4795SJuergen Gross i = queue->rx.rsp_cons + 1; 1304b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, i); 13050d160211SJeremy Fitzhardinge work_done++; 13060d160211SJeremy Fitzhardinge } 13076c5aa6fcSDenis Kirjanov if (need_xdp_flush) 13086c5aa6fcSDenis Kirjanov xdp_do_flush(); 13090d160211SJeremy Fitzhardinge 131056cfe5d0SWang Chen __skb_queue_purge(&errq); 13110d160211SJeremy Fitzhardinge 13122688fcb7SAndrew J. Bennieston work_done -= handle_incoming_queue(queue, &rxq); 13130d160211SJeremy Fitzhardinge 13142688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 13150d160211SJeremy Fitzhardinge 13160d160211SJeremy Fitzhardinge if (work_done < budget) { 1317bea3348eSStephen Hemminger int more_to_do = 0; 1318bea3348eSStephen Hemminger 13196ad20165SEric Dumazet napi_complete_done(napi, work_done); 13200d160211SJeremy Fitzhardinge 13212688fcb7SAndrew J. Bennieston RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 13226a6dc08fSDavid Vrabel if (more_to_do) 13236a6dc08fSDavid Vrabel napi_schedule(napi); 13240d160211SJeremy Fitzhardinge } 13250d160211SJeremy Fitzhardinge 13262688fcb7SAndrew J. Bennieston spin_unlock(&queue->rx_lock); 13270d160211SJeremy Fitzhardinge 1328bea3348eSStephen Hemminger return work_done; 13290d160211SJeremy Fitzhardinge } 13300d160211SJeremy Fitzhardinge 13310d160211SJeremy Fitzhardinge static int xennet_change_mtu(struct net_device *dev, int mtu) 13320d160211SJeremy Fitzhardinge { 13330c36820eSJonathan Davies int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 13340d160211SJeremy Fitzhardinge 13350d160211SJeremy Fitzhardinge if (mtu > max) 13360d160211SJeremy Fitzhardinge return -EINVAL; 13370d160211SJeremy Fitzhardinge dev->mtu = mtu; 13380d160211SJeremy Fitzhardinge return 0; 13390d160211SJeremy Fitzhardinge } 13400d160211SJeremy Fitzhardinge 1341bc1f4470Sstephen hemminger static void xennet_get_stats64(struct net_device *dev, 1342e00f85beSstephen hemminger struct rtnl_link_stats64 *tot) 1343e00f85beSstephen hemminger { 1344e00f85beSstephen hemminger struct netfront_info *np = netdev_priv(dev); 1345e00f85beSstephen hemminger int cpu; 1346e00f85beSstephen hemminger 1347e00f85beSstephen hemminger for_each_possible_cpu(cpu) { 1348900e1833SDavid Vrabel struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1349900e1833SDavid Vrabel struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1350e00f85beSstephen hemminger u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1351e00f85beSstephen hemminger unsigned int start; 1352e00f85beSstephen hemminger 1353e00f85beSstephen hemminger do { 1354900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1355900e1833SDavid Vrabel tx_packets = tx_stats->packets; 1356900e1833SDavid Vrabel tx_bytes = tx_stats->bytes; 1357900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1358e00f85beSstephen hemminger 1359900e1833SDavid Vrabel do { 1360900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1361900e1833SDavid Vrabel rx_packets = rx_stats->packets; 1362900e1833SDavid Vrabel rx_bytes = rx_stats->bytes; 1363900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1364e00f85beSstephen hemminger 1365e00f85beSstephen hemminger tot->rx_packets += rx_packets; 1366e00f85beSstephen hemminger tot->tx_packets += tx_packets; 1367e00f85beSstephen hemminger tot->rx_bytes += rx_bytes; 1368e00f85beSstephen hemminger tot->tx_bytes += tx_bytes; 1369e00f85beSstephen hemminger } 1370e00f85beSstephen hemminger 1371e00f85beSstephen hemminger tot->rx_errors = dev->stats.rx_errors; 1372e00f85beSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1373e00f85beSstephen hemminger } 1374e00f85beSstephen hemminger 13752688fcb7SAndrew J. Bennieston static void xennet_release_tx_bufs(struct netfront_queue *queue) 13760d160211SJeremy Fitzhardinge { 13770d160211SJeremy Fitzhardinge struct sk_buff *skb; 13780d160211SJeremy Fitzhardinge int i; 13790d160211SJeremy Fitzhardinge 13800d160211SJeremy Fitzhardinge for (i = 0; i < NET_TX_RING_SIZE; i++) { 13810d160211SJeremy Fitzhardinge /* Skip over entries which are actually freelist references */ 138221631d2dSJuergen Gross if (!queue->tx_skbs[i]) 13830d160211SJeremy Fitzhardinge continue; 13840d160211SJeremy Fitzhardinge 138521631d2dSJuergen Gross skb = queue->tx_skbs[i]; 138621631d2dSJuergen Gross queue->tx_skbs[i] = NULL; 13872688fcb7SAndrew J. Bennieston get_page(queue->grant_tx_page[i]); 13882688fcb7SAndrew J. Bennieston gnttab_end_foreign_access(queue->grant_tx_ref[i], 13892688fcb7SAndrew J. Bennieston (unsigned long)page_address(queue->grant_tx_page[i])); 13902688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 1391145daab2SJuergen Gross queue->grant_tx_ref[i] = INVALID_GRANT_REF; 139221631d2dSJuergen Gross add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i); 13930d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 13940d160211SJeremy Fitzhardinge } 13950d160211SJeremy Fitzhardinge } 13960d160211SJeremy Fitzhardinge 13972688fcb7SAndrew J. Bennieston static void xennet_release_rx_bufs(struct netfront_queue *queue) 13980d160211SJeremy Fitzhardinge { 13990d160211SJeremy Fitzhardinge int id, ref; 14000d160211SJeremy Fitzhardinge 14012688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 14020d160211SJeremy Fitzhardinge 14030d160211SJeremy Fitzhardinge for (id = 0; id < NET_RX_RING_SIZE; id++) { 1404cefe0078SAnnie Li struct sk_buff *skb; 1405cefe0078SAnnie Li struct page *page; 14060d160211SJeremy Fitzhardinge 14072688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[id]; 1408cefe0078SAnnie Li if (!skb) 1409cefe0078SAnnie Li continue; 1410cefe0078SAnnie Li 14112688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[id]; 1412145daab2SJuergen Gross if (ref == INVALID_GRANT_REF) 1413cefe0078SAnnie Li continue; 1414cefe0078SAnnie Li 1415cefe0078SAnnie Li page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1416cefe0078SAnnie Li 1417cefe0078SAnnie Li /* gnttab_end_foreign_access() needs a page ref until 1418cefe0078SAnnie Li * foreign access is ended (which may be deferred). 1419cefe0078SAnnie Li */ 1420cefe0078SAnnie Li get_page(page); 1421c94b731dSJuergen Gross gnttab_end_foreign_access(ref, 1422cefe0078SAnnie Li (unsigned long)page_address(page)); 1423145daab2SJuergen Gross queue->grant_rx_ref[id] = INVALID_GRANT_REF; 14240d160211SJeremy Fitzhardinge 1425cefe0078SAnnie Li kfree_skb(skb); 14260d160211SJeremy Fitzhardinge } 14270d160211SJeremy Fitzhardinge 14282688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 14290d160211SJeremy Fitzhardinge } 14300d160211SJeremy Fitzhardinge 1431c8f44affSMichał Mirosław static netdev_features_t xennet_fix_features(struct net_device *dev, 1432c8f44affSMichał Mirosław netdev_features_t features) 14338f7b01a1SEric Dumazet { 14348f7b01a1SEric Dumazet struct netfront_info *np = netdev_priv(dev); 14358f7b01a1SEric Dumazet 14362890ea5cSJuergen Gross if (features & NETIF_F_SG && 14372890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) 14388f7b01a1SEric Dumazet features &= ~NETIF_F_SG; 14398f7b01a1SEric Dumazet 14402890ea5cSJuergen Gross if (features & NETIF_F_IPV6_CSUM && 14412890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, 14422890ea5cSJuergen Gross "feature-ipv6-csum-offload", 0)) 14432c0057deSPaul Durrant features &= ~NETIF_F_IPV6_CSUM; 14442c0057deSPaul Durrant 14452890ea5cSJuergen Gross if (features & NETIF_F_TSO && 14462890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) 14478f7b01a1SEric Dumazet features &= ~NETIF_F_TSO; 14488f7b01a1SEric Dumazet 14492890ea5cSJuergen Gross if (features & NETIF_F_TSO6 && 14502890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) 14512c0057deSPaul Durrant features &= ~NETIF_F_TSO6; 14522c0057deSPaul Durrant 14538f7b01a1SEric Dumazet return features; 14548f7b01a1SEric Dumazet } 14558f7b01a1SEric Dumazet 1456c8f44affSMichał Mirosław static int xennet_set_features(struct net_device *dev, 1457c8f44affSMichał Mirosław netdev_features_t features) 14588f7b01a1SEric Dumazet { 14598f7b01a1SEric Dumazet if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 14608f7b01a1SEric Dumazet netdev_info(dev, "Reducing MTU because no SG offload"); 14618f7b01a1SEric Dumazet dev->mtu = ETH_DATA_LEN; 14628f7b01a1SEric Dumazet } 14638f7b01a1SEric Dumazet 14648f7b01a1SEric Dumazet return 0; 14658f7b01a1SEric Dumazet } 14668f7b01a1SEric Dumazet 1467b27d4795SJuergen Gross static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi) 1468cf66f9d4SKonrad Rzeszutek Wilk { 1469cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 1470cf66f9d4SKonrad Rzeszutek Wilk 1471b27d4795SJuergen Gross if (unlikely(queue->info->broken)) 1472b27d4795SJuergen Gross return false; 1473a884daa6SJuergen Gross 14742688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 1475b27d4795SJuergen Gross if (xennet_tx_buf_gc(queue)) 1476b27d4795SJuergen Gross *eoi = 0; 14772688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 1478cf66f9d4SKonrad Rzeszutek Wilk 1479b27d4795SJuergen Gross return true; 1480b27d4795SJuergen Gross } 1481b27d4795SJuergen Gross 1482b27d4795SJuergen Gross static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1483b27d4795SJuergen Gross { 1484b27d4795SJuergen Gross unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1485b27d4795SJuergen Gross 1486b27d4795SJuergen Gross if (likely(xennet_handle_tx(dev_id, &eoiflag))) 1487b27d4795SJuergen Gross xen_irq_lateeoi(irq, eoiflag); 1488b27d4795SJuergen Gross 1489cf66f9d4SKonrad Rzeszutek Wilk return IRQ_HANDLED; 1490cf66f9d4SKonrad Rzeszutek Wilk } 1491cf66f9d4SKonrad Rzeszutek Wilk 1492b27d4795SJuergen Gross static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi) 1493b27d4795SJuergen Gross { 1494b27d4795SJuergen Gross unsigned int work_queued; 1495b27d4795SJuergen Gross unsigned long flags; 1496b27d4795SJuergen Gross 1497b27d4795SJuergen Gross if (unlikely(queue->info->broken)) 1498b27d4795SJuergen Gross return false; 1499b27d4795SJuergen Gross 1500b27d4795SJuergen Gross spin_lock_irqsave(&queue->rx_cons_lock, flags); 15016fac592cSJuergen Gross work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx); 1502b27d4795SJuergen Gross if (work_queued > queue->rx_rsp_unconsumed) { 1503b27d4795SJuergen Gross queue->rx_rsp_unconsumed = work_queued; 1504b27d4795SJuergen Gross *eoi = 0; 1505b27d4795SJuergen Gross } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) { 1506b27d4795SJuergen Gross const struct device *dev = &queue->info->netdev->dev; 1507b27d4795SJuergen Gross 1508b27d4795SJuergen Gross spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 1509b27d4795SJuergen Gross dev_alert(dev, "RX producer index going backwards\n"); 1510b27d4795SJuergen Gross dev_alert(dev, "Disabled for further use\n"); 1511b27d4795SJuergen Gross queue->info->broken = true; 1512b27d4795SJuergen Gross return false; 1513b27d4795SJuergen Gross } 1514b27d4795SJuergen Gross spin_unlock_irqrestore(&queue->rx_cons_lock, flags); 1515b27d4795SJuergen Gross 1516b27d4795SJuergen Gross if (likely(netif_carrier_ok(queue->info->netdev) && work_queued)) 1517b27d4795SJuergen Gross napi_schedule(&queue->napi); 1518b27d4795SJuergen Gross 1519b27d4795SJuergen Gross return true; 1520b27d4795SJuergen Gross } 1521b27d4795SJuergen Gross 1522d634bf2cSWei Liu static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1523d634bf2cSWei Liu { 1524b27d4795SJuergen Gross unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1525d634bf2cSWei Liu 1526b27d4795SJuergen Gross if (likely(xennet_handle_rx(dev_id, &eoiflag))) 1527b27d4795SJuergen Gross xen_irq_lateeoi(irq, eoiflag); 1528d634bf2cSWei Liu 1529d634bf2cSWei Liu return IRQ_HANDLED; 1530d634bf2cSWei Liu } 1531d634bf2cSWei Liu 1532d634bf2cSWei Liu static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1533d634bf2cSWei Liu { 1534b27d4795SJuergen Gross unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS; 1535b27d4795SJuergen Gross 1536b27d4795SJuergen Gross if (xennet_handle_tx(dev_id, &eoiflag) && 1537b27d4795SJuergen Gross xennet_handle_rx(dev_id, &eoiflag)) 1538b27d4795SJuergen Gross xen_irq_lateeoi(irq, eoiflag); 1539b27d4795SJuergen Gross 1540d634bf2cSWei Liu return IRQ_HANDLED; 1541d634bf2cSWei Liu } 1542d634bf2cSWei Liu 1543cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1544cf66f9d4SKonrad Rzeszutek Wilk static void xennet_poll_controller(struct net_device *dev) 1545cf66f9d4SKonrad Rzeszutek Wilk { 15462688fcb7SAndrew J. Bennieston /* Poll each queue */ 15472688fcb7SAndrew J. Bennieston struct netfront_info *info = netdev_priv(dev); 15482688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 15492688fcb7SAndrew J. Bennieston unsigned int i; 1550a884daa6SJuergen Gross 1551a884daa6SJuergen Gross if (info->broken) 1552a884daa6SJuergen Gross return; 1553a884daa6SJuergen Gross 15542688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) 15552688fcb7SAndrew J. Bennieston xennet_interrupt(0, &info->queues[i]); 1556cf66f9d4SKonrad Rzeszutek Wilk } 1557cf66f9d4SKonrad Rzeszutek Wilk #endif 1558cf66f9d4SKonrad Rzeszutek Wilk 15596c5aa6fcSDenis Kirjanov #define NETBACK_XDP_HEADROOM_DISABLE 0 15606c5aa6fcSDenis Kirjanov #define NETBACK_XDP_HEADROOM_ENABLE 1 15616c5aa6fcSDenis Kirjanov 15626c5aa6fcSDenis Kirjanov static int talk_to_netback_xdp(struct netfront_info *np, int xdp) 15636c5aa6fcSDenis Kirjanov { 15646c5aa6fcSDenis Kirjanov int err; 15656c5aa6fcSDenis Kirjanov unsigned short headroom; 15666c5aa6fcSDenis Kirjanov 15676c5aa6fcSDenis Kirjanov headroom = xdp ? XDP_PACKET_HEADROOM : 0; 15686c5aa6fcSDenis Kirjanov err = xenbus_printf(XBT_NIL, np->xbdev->nodename, 15696c5aa6fcSDenis Kirjanov "xdp-headroom", "%hu", 15706c5aa6fcSDenis Kirjanov headroom); 15716c5aa6fcSDenis Kirjanov if (err) 15726c5aa6fcSDenis Kirjanov pr_warn("Error writing xdp-headroom\n"); 15736c5aa6fcSDenis Kirjanov 15746c5aa6fcSDenis Kirjanov return err; 15756c5aa6fcSDenis Kirjanov } 15766c5aa6fcSDenis Kirjanov 15776c5aa6fcSDenis Kirjanov static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, 15786c5aa6fcSDenis Kirjanov struct netlink_ext_ack *extack) 15796c5aa6fcSDenis Kirjanov { 15806c5aa6fcSDenis Kirjanov unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; 15816c5aa6fcSDenis Kirjanov struct netfront_info *np = netdev_priv(dev); 15826c5aa6fcSDenis Kirjanov struct bpf_prog *old_prog; 15836c5aa6fcSDenis Kirjanov unsigned int i, err; 15846c5aa6fcSDenis Kirjanov 15856c5aa6fcSDenis Kirjanov if (dev->mtu > max_mtu) { 15866c5aa6fcSDenis Kirjanov netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu); 15876c5aa6fcSDenis Kirjanov return -EINVAL; 15886c5aa6fcSDenis Kirjanov } 15896c5aa6fcSDenis Kirjanov 15906c5aa6fcSDenis Kirjanov if (!np->netback_has_xdp_headroom) 15916c5aa6fcSDenis Kirjanov return 0; 15926c5aa6fcSDenis Kirjanov 15936c5aa6fcSDenis Kirjanov xenbus_switch_state(np->xbdev, XenbusStateReconfiguring); 15946c5aa6fcSDenis Kirjanov 15956c5aa6fcSDenis Kirjanov err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE : 15966c5aa6fcSDenis Kirjanov NETBACK_XDP_HEADROOM_DISABLE); 15976c5aa6fcSDenis Kirjanov if (err) 15986c5aa6fcSDenis Kirjanov return err; 15996c5aa6fcSDenis Kirjanov 16006c5aa6fcSDenis Kirjanov /* avoid the race with XDP headroom adjustment */ 16016c5aa6fcSDenis Kirjanov wait_event(module_wq, 16026c5aa6fcSDenis Kirjanov xenbus_read_driver_state(np->xbdev->otherend) == 16036c5aa6fcSDenis Kirjanov XenbusStateReconfigured); 16046c5aa6fcSDenis Kirjanov np->netfront_xdp_enabled = true; 16056c5aa6fcSDenis Kirjanov 16066c5aa6fcSDenis Kirjanov old_prog = rtnl_dereference(np->queues[0].xdp_prog); 16076c5aa6fcSDenis Kirjanov 16086c5aa6fcSDenis Kirjanov if (prog) 16096c5aa6fcSDenis Kirjanov bpf_prog_add(prog, dev->real_num_tx_queues); 16106c5aa6fcSDenis Kirjanov 16116c5aa6fcSDenis Kirjanov for (i = 0; i < dev->real_num_tx_queues; ++i) 16126c5aa6fcSDenis Kirjanov rcu_assign_pointer(np->queues[i].xdp_prog, prog); 16136c5aa6fcSDenis Kirjanov 16146c5aa6fcSDenis Kirjanov if (old_prog) 16156c5aa6fcSDenis Kirjanov for (i = 0; i < dev->real_num_tx_queues; ++i) 16166c5aa6fcSDenis Kirjanov bpf_prog_put(old_prog); 16176c5aa6fcSDenis Kirjanov 16186c5aa6fcSDenis Kirjanov xenbus_switch_state(np->xbdev, XenbusStateConnected); 16196c5aa6fcSDenis Kirjanov 16206c5aa6fcSDenis Kirjanov return 0; 16216c5aa6fcSDenis Kirjanov } 16226c5aa6fcSDenis Kirjanov 16236c5aa6fcSDenis Kirjanov static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) 16246c5aa6fcSDenis Kirjanov { 1625a884daa6SJuergen Gross struct netfront_info *np = netdev_priv(dev); 1626a884daa6SJuergen Gross 1627a884daa6SJuergen Gross if (np->broken) 1628a884daa6SJuergen Gross return -ENODEV; 1629a884daa6SJuergen Gross 16306c5aa6fcSDenis Kirjanov switch (xdp->command) { 16316c5aa6fcSDenis Kirjanov case XDP_SETUP_PROG: 16326c5aa6fcSDenis Kirjanov return xennet_xdp_set(dev, xdp->prog, xdp->extack); 16336c5aa6fcSDenis Kirjanov default: 16346c5aa6fcSDenis Kirjanov return -EINVAL; 16356c5aa6fcSDenis Kirjanov } 16366c5aa6fcSDenis Kirjanov } 16376c5aa6fcSDenis Kirjanov 16380a0b9d2eSStephen Hemminger static const struct net_device_ops xennet_netdev_ops = { 1639dcf4ff7aSMarek Marczykowski-Górecki .ndo_uninit = xennet_uninit, 16400a0b9d2eSStephen Hemminger .ndo_open = xennet_open, 16410a0b9d2eSStephen Hemminger .ndo_stop = xennet_close, 16420a0b9d2eSStephen Hemminger .ndo_start_xmit = xennet_start_xmit, 16430a0b9d2eSStephen Hemminger .ndo_change_mtu = xennet_change_mtu, 1644e00f85beSstephen hemminger .ndo_get_stats64 = xennet_get_stats64, 16450a0b9d2eSStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 16460a0b9d2eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1647fb507934SMichał Mirosław .ndo_fix_features = xennet_fix_features, 1648fb507934SMichał Mirosław .ndo_set_features = xennet_set_features, 16492688fcb7SAndrew J. Bennieston .ndo_select_queue = xennet_select_queue, 16506c5aa6fcSDenis Kirjanov .ndo_bpf = xennet_xdp, 16516c5aa6fcSDenis Kirjanov .ndo_xdp_xmit = xennet_xdp_xmit, 1652cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1653cf66f9d4SKonrad Rzeszutek Wilk .ndo_poll_controller = xennet_poll_controller, 1654cf66f9d4SKonrad Rzeszutek Wilk #endif 16550a0b9d2eSStephen Hemminger }; 16560a0b9d2eSStephen Hemminger 1657900e1833SDavid Vrabel static void xennet_free_netdev(struct net_device *netdev) 1658900e1833SDavid Vrabel { 1659900e1833SDavid Vrabel struct netfront_info *np = netdev_priv(netdev); 1660900e1833SDavid Vrabel 1661900e1833SDavid Vrabel free_percpu(np->rx_stats); 1662900e1833SDavid Vrabel free_percpu(np->tx_stats); 1663900e1833SDavid Vrabel free_netdev(netdev); 1664900e1833SDavid Vrabel } 1665900e1833SDavid Vrabel 16668e0e46bbSBill Pemberton static struct net_device *xennet_create_dev(struct xenbus_device *dev) 16670d160211SJeremy Fitzhardinge { 16682688fcb7SAndrew J. Bennieston int err; 16690d160211SJeremy Fitzhardinge struct net_device *netdev; 16700d160211SJeremy Fitzhardinge struct netfront_info *np; 16710d160211SJeremy Fitzhardinge 167250ee6061SAndrew J. Bennieston netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 167341de8d4cSJoe Perches if (!netdev) 16740d160211SJeremy Fitzhardinge return ERR_PTR(-ENOMEM); 16750d160211SJeremy Fitzhardinge 16760d160211SJeremy Fitzhardinge np = netdev_priv(netdev); 16770d160211SJeremy Fitzhardinge np->xbdev = dev; 16780d160211SJeremy Fitzhardinge 16792688fcb7SAndrew J. Bennieston np->queues = NULL; 16800d160211SJeremy Fitzhardinge 1681e00f85beSstephen hemminger err = -ENOMEM; 1682900e1833SDavid Vrabel np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1683900e1833SDavid Vrabel if (np->rx_stats == NULL) 1684900e1833SDavid Vrabel goto exit; 1685900e1833SDavid Vrabel np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1686900e1833SDavid Vrabel if (np->tx_stats == NULL) 1687e00f85beSstephen hemminger goto exit; 1688e00f85beSstephen hemminger 16890a0b9d2eSStephen Hemminger netdev->netdev_ops = &xennet_netdev_ops; 16900a0b9d2eSStephen Hemminger 1691fb507934SMichał Mirosław netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1692fb507934SMichał Mirosław NETIF_F_GSO_ROBUST; 16932c0057deSPaul Durrant netdev->hw_features = NETIF_F_SG | 16942c0057deSPaul Durrant NETIF_F_IPV6_CSUM | 16952c0057deSPaul Durrant NETIF_F_TSO | NETIF_F_TSO6; 16960d160211SJeremy Fitzhardinge 1697fc3e5941SIan Campbell /* 1698fc3e5941SIan Campbell * Assume that all hw features are available for now. This set 1699fc3e5941SIan Campbell * will be adjusted by the call to netdev_update_features() in 1700fc3e5941SIan Campbell * xennet_connect() which is the earliest point where we can 1701fc3e5941SIan Campbell * negotiate with the backend regarding supported features. 1702fc3e5941SIan Campbell */ 1703fc3e5941SIan Campbell netdev->features |= netdev->hw_features; 1704fc3e5941SIan Campbell 17057ad24ea4SWilfried Klaebe netdev->ethtool_ops = &xennet_ethtool_ops; 1706e1043a4bSMohammed Gamal netdev->min_mtu = ETH_MIN_MTU; 1707d0c2c997SJarod Wilson netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 17080d160211SJeremy Fitzhardinge SET_NETDEV_DEV(netdev, &dev->dev); 17090d160211SJeremy Fitzhardinge 17100d160211SJeremy Fitzhardinge np->netdev = netdev; 17116c5aa6fcSDenis Kirjanov np->netfront_xdp_enabled = false; 17120d160211SJeremy Fitzhardinge 17130d160211SJeremy Fitzhardinge netif_carrier_off(netdev); 17140d160211SJeremy Fitzhardinge 1715c2c63310SAndrea Righi do { 1716b707fda2SEduardo Otubo xenbus_switch_state(dev, XenbusStateInitialising); 1717c2c63310SAndrea Righi err = wait_event_timeout(module_wq, 1718822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) != 1719822fb18aSXiao Liang XenbusStateClosed && 1720822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) != 1721c2c63310SAndrea Righi XenbusStateUnknown, XENNET_TIMEOUT); 1722c2c63310SAndrea Righi } while (!err); 1723c2c63310SAndrea Righi 17240d160211SJeremy Fitzhardinge return netdev; 17250d160211SJeremy Fitzhardinge 17260d160211SJeremy Fitzhardinge exit: 1727900e1833SDavid Vrabel xennet_free_netdev(netdev); 17280d160211SJeremy Fitzhardinge return ERR_PTR(err); 17290d160211SJeremy Fitzhardinge } 17300d160211SJeremy Fitzhardinge 173180708602SLee Jones /* 17320d160211SJeremy Fitzhardinge * Entry point to this code when a new device is created. Allocate the basic 17330d160211SJeremy Fitzhardinge * structures and the ring buffers for communication with the backend, and 17340d160211SJeremy Fitzhardinge * inform the backend of the appropriate details for those. 17350d160211SJeremy Fitzhardinge */ 17368e0e46bbSBill Pemberton static int netfront_probe(struct xenbus_device *dev, 17370d160211SJeremy Fitzhardinge const struct xenbus_device_id *id) 17380d160211SJeremy Fitzhardinge { 17390d160211SJeremy Fitzhardinge int err; 17400d160211SJeremy Fitzhardinge struct net_device *netdev; 17410d160211SJeremy Fitzhardinge struct netfront_info *info; 17420d160211SJeremy Fitzhardinge 17430d160211SJeremy Fitzhardinge netdev = xennet_create_dev(dev); 17440d160211SJeremy Fitzhardinge if (IS_ERR(netdev)) { 17450d160211SJeremy Fitzhardinge err = PTR_ERR(netdev); 17460d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "creating netdev"); 17470d160211SJeremy Fitzhardinge return err; 17480d160211SJeremy Fitzhardinge } 17490d160211SJeremy Fitzhardinge 17500d160211SJeremy Fitzhardinge info = netdev_priv(netdev); 17511b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, info); 175227b917e5STakashi Iwai #ifdef CONFIG_SYSFS 175327b917e5STakashi Iwai info->netdev->sysfs_groups[0] = &xennet_dev_group; 175427b917e5STakashi Iwai #endif 17550d160211SJeremy Fitzhardinge 17560d160211SJeremy Fitzhardinge return 0; 17570d160211SJeremy Fitzhardinge } 17580d160211SJeremy Fitzhardinge 17590d160211SJeremy Fitzhardinge static void xennet_end_access(int ref, void *page) 17600d160211SJeremy Fitzhardinge { 17610d160211SJeremy Fitzhardinge /* This frees the page as a side-effect */ 1762145daab2SJuergen Gross if (ref != INVALID_GRANT_REF) 1763c94b731dSJuergen Gross gnttab_end_foreign_access(ref, (unsigned long)page); 17640d160211SJeremy Fitzhardinge } 17650d160211SJeremy Fitzhardinge 17660d160211SJeremy Fitzhardinge static void xennet_disconnect_backend(struct netfront_info *info) 17670d160211SJeremy Fitzhardinge { 17682688fcb7SAndrew J. Bennieston unsigned int i = 0; 17692688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues; 17700d160211SJeremy Fitzhardinge 1771f9feb1e6SDavid Vrabel netif_carrier_off(info->netdev); 1772f9feb1e6SDavid Vrabel 17739a873c71SChas Williams for (i = 0; i < num_queues && info->queues; ++i) { 177476541869SDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 177576541869SDavid Vrabel 177674470954SBoris Ostrovsky del_timer_sync(&queue->rx_refill_timer); 177774470954SBoris Ostrovsky 17782688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 17792688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 17802688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 17812688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 17822688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->rx_irq, queue); 1783d634bf2cSWei Liu } 17842688fcb7SAndrew J. Bennieston queue->tx_evtchn = queue->rx_evtchn = 0; 17852688fcb7SAndrew J. Bennieston queue->tx_irq = queue->rx_irq = 0; 17860d160211SJeremy Fitzhardinge 1787274b0455SChas Williams if (netif_running(info->netdev)) 1788f9feb1e6SDavid Vrabel napi_synchronize(&queue->napi); 1789f9feb1e6SDavid Vrabel 1790a5b5dc3cSDavid Vrabel xennet_release_tx_bufs(queue); 1791a5b5dc3cSDavid Vrabel xennet_release_rx_bufs(queue); 1792a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_tx_head); 1793a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_rx_head); 1794a5b5dc3cSDavid Vrabel 17950d160211SJeremy Fitzhardinge /* End access and free the pages */ 17962688fcb7SAndrew J. Bennieston xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 17972688fcb7SAndrew J. Bennieston xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 17980d160211SJeremy Fitzhardinge 1799145daab2SJuergen Gross queue->tx_ring_ref = INVALID_GRANT_REF; 1800145daab2SJuergen Gross queue->rx_ring_ref = INVALID_GRANT_REF; 18012688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 18022688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 18036c5aa6fcSDenis Kirjanov 18046c5aa6fcSDenis Kirjanov page_pool_destroy(queue->page_pool); 18052688fcb7SAndrew J. Bennieston } 18060d160211SJeremy Fitzhardinge } 18070d160211SJeremy Fitzhardinge 180880708602SLee Jones /* 18090d160211SJeremy Fitzhardinge * We are reconnecting to the backend, due to a suspend/resume, or a backend 18100d160211SJeremy Fitzhardinge * driver restart. We tear down our netif structure and recreate it, but 18110d160211SJeremy Fitzhardinge * leave the device-layer structures intact so that this is transparent to the 18120d160211SJeremy Fitzhardinge * rest of the kernel. 18130d160211SJeremy Fitzhardinge */ 18140d160211SJeremy Fitzhardinge static int netfront_resume(struct xenbus_device *dev) 18150d160211SJeremy Fitzhardinge { 18161b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 18170d160211SJeremy Fitzhardinge 18180d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 18190d160211SJeremy Fitzhardinge 1820042b2046SDongli Zhang netif_tx_lock_bh(info->netdev); 1821042b2046SDongli Zhang netif_device_detach(info->netdev); 1822042b2046SDongli Zhang netif_tx_unlock_bh(info->netdev); 1823042b2046SDongli Zhang 18240d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 18250d160211SJeremy Fitzhardinge return 0; 18260d160211SJeremy Fitzhardinge } 18270d160211SJeremy Fitzhardinge 18280d160211SJeremy Fitzhardinge static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 18290d160211SJeremy Fitzhardinge { 18300d160211SJeremy Fitzhardinge char *s, *e, *macstr; 18310d160211SJeremy Fitzhardinge int i; 18320d160211SJeremy Fitzhardinge 18330d160211SJeremy Fitzhardinge macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 18340d160211SJeremy Fitzhardinge if (IS_ERR(macstr)) 18350d160211SJeremy Fitzhardinge return PTR_ERR(macstr); 18360d160211SJeremy Fitzhardinge 18370d160211SJeremy Fitzhardinge for (i = 0; i < ETH_ALEN; i++) { 18380d160211SJeremy Fitzhardinge mac[i] = simple_strtoul(s, &e, 16); 18390d160211SJeremy Fitzhardinge if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 18400d160211SJeremy Fitzhardinge kfree(macstr); 18410d160211SJeremy Fitzhardinge return -ENOENT; 18420d160211SJeremy Fitzhardinge } 18430d160211SJeremy Fitzhardinge s = e+1; 18440d160211SJeremy Fitzhardinge } 18450d160211SJeremy Fitzhardinge 18460d160211SJeremy Fitzhardinge kfree(macstr); 18470d160211SJeremy Fitzhardinge return 0; 18480d160211SJeremy Fitzhardinge } 18490d160211SJeremy Fitzhardinge 18502688fcb7SAndrew J. Bennieston static int setup_netfront_single(struct netfront_queue *queue) 1851d634bf2cSWei Liu { 1852d634bf2cSWei Liu int err; 1853d634bf2cSWei Liu 18542688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1855d634bf2cSWei Liu if (err < 0) 1856d634bf2cSWei Liu goto fail; 1857d634bf2cSWei Liu 1858b27d4795SJuergen Gross err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, 1859b27d4795SJuergen Gross xennet_interrupt, 0, 1860b27d4795SJuergen Gross queue->info->netdev->name, 1861b27d4795SJuergen Gross queue); 1862d634bf2cSWei Liu if (err < 0) 1863d634bf2cSWei Liu goto bind_fail; 18642688fcb7SAndrew J. Bennieston queue->rx_evtchn = queue->tx_evtchn; 18652688fcb7SAndrew J. Bennieston queue->rx_irq = queue->tx_irq = err; 1866d634bf2cSWei Liu 1867d634bf2cSWei Liu return 0; 1868d634bf2cSWei Liu 1869d634bf2cSWei Liu bind_fail: 18702688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 18712688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1872d634bf2cSWei Liu fail: 1873d634bf2cSWei Liu return err; 1874d634bf2cSWei Liu } 1875d634bf2cSWei Liu 18762688fcb7SAndrew J. Bennieston static int setup_netfront_split(struct netfront_queue *queue) 1877d634bf2cSWei Liu { 1878d634bf2cSWei Liu int err; 1879d634bf2cSWei Liu 18802688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1881d634bf2cSWei Liu if (err < 0) 1882d634bf2cSWei Liu goto fail; 18832688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1884d634bf2cSWei Liu if (err < 0) 1885d634bf2cSWei Liu goto alloc_rx_evtchn_fail; 1886d634bf2cSWei Liu 18872688fcb7SAndrew J. Bennieston snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 18882688fcb7SAndrew J. Bennieston "%s-tx", queue->name); 1889b27d4795SJuergen Gross err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn, 1890b27d4795SJuergen Gross xennet_tx_interrupt, 0, 1891b27d4795SJuergen Gross queue->tx_irq_name, queue); 1892d634bf2cSWei Liu if (err < 0) 1893d634bf2cSWei Liu goto bind_tx_fail; 18942688fcb7SAndrew J. Bennieston queue->tx_irq = err; 1895d634bf2cSWei Liu 18962688fcb7SAndrew J. Bennieston snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 18972688fcb7SAndrew J. Bennieston "%s-rx", queue->name); 1898b27d4795SJuergen Gross err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn, 1899b27d4795SJuergen Gross xennet_rx_interrupt, 0, 1900b27d4795SJuergen Gross queue->rx_irq_name, queue); 1901d634bf2cSWei Liu if (err < 0) 1902d634bf2cSWei Liu goto bind_rx_fail; 19032688fcb7SAndrew J. Bennieston queue->rx_irq = err; 1904d634bf2cSWei Liu 1905d634bf2cSWei Liu return 0; 1906d634bf2cSWei Liu 1907d634bf2cSWei Liu bind_rx_fail: 19082688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 19092688fcb7SAndrew J. Bennieston queue->tx_irq = 0; 1910d634bf2cSWei Liu bind_tx_fail: 19112688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 19122688fcb7SAndrew J. Bennieston queue->rx_evtchn = 0; 1913d634bf2cSWei Liu alloc_rx_evtchn_fail: 19142688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 19152688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1916d634bf2cSWei Liu fail: 1917d634bf2cSWei Liu return err; 1918d634bf2cSWei Liu } 1919d634bf2cSWei Liu 19202688fcb7SAndrew J. Bennieston static int setup_netfront(struct xenbus_device *dev, 19212688fcb7SAndrew J. Bennieston struct netfront_queue *queue, unsigned int feature_split_evtchn) 19220d160211SJeremy Fitzhardinge { 19230d160211SJeremy Fitzhardinge struct xen_netif_tx_sring *txs; 1924*46e20d43SJuergen Gross struct xen_netif_rx_sring *rxs; 19250d160211SJeremy Fitzhardinge int err; 19260d160211SJeremy Fitzhardinge 1927145daab2SJuergen Gross queue->tx_ring_ref = INVALID_GRANT_REF; 1928145daab2SJuergen Gross queue->rx_ring_ref = INVALID_GRANT_REF; 19292688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 19302688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 19310d160211SJeremy Fitzhardinge 1932*46e20d43SJuergen Gross err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs, 1933*46e20d43SJuergen Gross 1, &queue->tx_ring_ref); 1934*46e20d43SJuergen Gross if (err) 19350d160211SJeremy Fitzhardinge goto fail; 19360d160211SJeremy Fitzhardinge 1937*46e20d43SJuergen Gross XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 19380d160211SJeremy Fitzhardinge 1939*46e20d43SJuergen Gross err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs, 1940*46e20d43SJuergen Gross 1, &queue->rx_ring_ref); 1941*46e20d43SJuergen Gross if (err) 194266e3531bSJuergen Gross goto fail; 19430d160211SJeremy Fitzhardinge 1944*46e20d43SJuergen Gross XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 19450d160211SJeremy Fitzhardinge 1946d634bf2cSWei Liu if (feature_split_evtchn) 19472688fcb7SAndrew J. Bennieston err = setup_netfront_split(queue); 1948d634bf2cSWei Liu /* setup single event channel if 1949d634bf2cSWei Liu * a) feature-split-event-channels == 0 1950d634bf2cSWei Liu * b) feature-split-event-channels == 1 but failed to setup 1951d634bf2cSWei Liu */ 1952e93fac3bSJiapeng Chong if (!feature_split_evtchn || err) 19532688fcb7SAndrew J. Bennieston err = setup_netfront_single(queue); 1954d634bf2cSWei Liu 19550d160211SJeremy Fitzhardinge if (err) 195666e3531bSJuergen Gross goto fail; 19570d160211SJeremy Fitzhardinge 19580d160211SJeremy Fitzhardinge return 0; 19590d160211SJeremy Fitzhardinge 19600d160211SJeremy Fitzhardinge fail: 1961*46e20d43SJuergen Gross xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref); 1962*46e20d43SJuergen Gross xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref); 1963*46e20d43SJuergen Gross 19640d160211SJeremy Fitzhardinge return err; 19650d160211SJeremy Fitzhardinge } 19660d160211SJeremy Fitzhardinge 19672688fcb7SAndrew J. Bennieston /* Queue-specific initialisation 19682688fcb7SAndrew J. Bennieston * This used to be done in xennet_create_dev() but must now 19692688fcb7SAndrew J. Bennieston * be run per-queue. 19702688fcb7SAndrew J. Bennieston */ 19712688fcb7SAndrew J. Bennieston static int xennet_init_queue(struct netfront_queue *queue) 19722688fcb7SAndrew J. Bennieston { 19732688fcb7SAndrew J. Bennieston unsigned short i; 19742688fcb7SAndrew J. Bennieston int err = 0; 197521f2706bSXiao Liang char *devid; 19762688fcb7SAndrew J. Bennieston 19772688fcb7SAndrew J. Bennieston spin_lock_init(&queue->tx_lock); 19782688fcb7SAndrew J. Bennieston spin_lock_init(&queue->rx_lock); 1979b27d4795SJuergen Gross spin_lock_init(&queue->rx_cons_lock); 19802688fcb7SAndrew J. Bennieston 1981e99e88a9SKees Cook timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 19822688fcb7SAndrew J. Bennieston 198321f2706bSXiao Liang devid = strrchr(queue->info->xbdev->nodename, '/') + 1; 198421f2706bSXiao Liang snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", 198521f2706bSXiao Liang devid, queue->id); 19868b715010SWei Liu 198721631d2dSJuergen Gross /* Initialise tx_skb_freelist as a free chain containing every entry. */ 19882688fcb7SAndrew J. Bennieston queue->tx_skb_freelist = 0; 1989a884daa6SJuergen Gross queue->tx_pend_queue = TX_LINK_NONE; 19902688fcb7SAndrew J. Bennieston for (i = 0; i < NET_TX_RING_SIZE; i++) { 199121631d2dSJuergen Gross queue->tx_link[i] = i + 1; 1992145daab2SJuergen Gross queue->grant_tx_ref[i] = INVALID_GRANT_REF; 19932688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 19942688fcb7SAndrew J. Bennieston } 199521631d2dSJuergen Gross queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE; 19962688fcb7SAndrew J. Bennieston 19972688fcb7SAndrew J. Bennieston /* Clear out rx_skbs */ 19982688fcb7SAndrew J. Bennieston for (i = 0; i < NET_RX_RING_SIZE; i++) { 19992688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 2000145daab2SJuergen Gross queue->grant_rx_ref[i] = INVALID_GRANT_REF; 20012688fcb7SAndrew J. Bennieston } 20022688fcb7SAndrew J. Bennieston 20032688fcb7SAndrew J. Bennieston /* A grant for every tx ring slot */ 20041f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 20052688fcb7SAndrew J. Bennieston &queue->gref_tx_head) < 0) { 20062688fcb7SAndrew J. Bennieston pr_alert("can't alloc tx grant refs\n"); 20072688fcb7SAndrew J. Bennieston err = -ENOMEM; 20082688fcb7SAndrew J. Bennieston goto exit; 20092688fcb7SAndrew J. Bennieston } 20102688fcb7SAndrew J. Bennieston 20112688fcb7SAndrew J. Bennieston /* A grant for every rx ring slot */ 20121f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 20132688fcb7SAndrew J. Bennieston &queue->gref_rx_head) < 0) { 20142688fcb7SAndrew J. Bennieston pr_alert("can't alloc rx grant refs\n"); 20152688fcb7SAndrew J. Bennieston err = -ENOMEM; 20162688fcb7SAndrew J. Bennieston goto exit_free_tx; 20172688fcb7SAndrew J. Bennieston } 20182688fcb7SAndrew J. Bennieston 20192688fcb7SAndrew J. Bennieston return 0; 20202688fcb7SAndrew J. Bennieston 20212688fcb7SAndrew J. Bennieston exit_free_tx: 20222688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head); 20232688fcb7SAndrew J. Bennieston exit: 20242688fcb7SAndrew J. Bennieston return err; 20252688fcb7SAndrew J. Bennieston } 20262688fcb7SAndrew J. Bennieston 202750ee6061SAndrew J. Bennieston static int write_queue_xenstore_keys(struct netfront_queue *queue, 202850ee6061SAndrew J. Bennieston struct xenbus_transaction *xbt, int write_hierarchical) 202950ee6061SAndrew J. Bennieston { 203050ee6061SAndrew J. Bennieston /* Write the queue-specific keys into XenStore in the traditional 203150ee6061SAndrew J. Bennieston * way for a single queue, or in a queue subkeys for multiple 203250ee6061SAndrew J. Bennieston * queues. 203350ee6061SAndrew J. Bennieston */ 203450ee6061SAndrew J. Bennieston struct xenbus_device *dev = queue->info->xbdev; 203550ee6061SAndrew J. Bennieston int err; 203650ee6061SAndrew J. Bennieston const char *message; 203750ee6061SAndrew J. Bennieston char *path; 203850ee6061SAndrew J. Bennieston size_t pathsize; 203950ee6061SAndrew J. Bennieston 204050ee6061SAndrew J. Bennieston /* Choose the correct place to write the keys */ 204150ee6061SAndrew J. Bennieston if (write_hierarchical) { 204250ee6061SAndrew J. Bennieston pathsize = strlen(dev->nodename) + 10; 204350ee6061SAndrew J. Bennieston path = kzalloc(pathsize, GFP_KERNEL); 204450ee6061SAndrew J. Bennieston if (!path) { 204550ee6061SAndrew J. Bennieston err = -ENOMEM; 204650ee6061SAndrew J. Bennieston message = "out of memory while writing ring references"; 204750ee6061SAndrew J. Bennieston goto error; 204850ee6061SAndrew J. Bennieston } 204950ee6061SAndrew J. Bennieston snprintf(path, pathsize, "%s/queue-%u", 205050ee6061SAndrew J. Bennieston dev->nodename, queue->id); 205150ee6061SAndrew J. Bennieston } else { 205250ee6061SAndrew J. Bennieston path = (char *)dev->nodename; 205350ee6061SAndrew J. Bennieston } 205450ee6061SAndrew J. Bennieston 205550ee6061SAndrew J. Bennieston /* Write ring references */ 205650ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 205750ee6061SAndrew J. Bennieston queue->tx_ring_ref); 205850ee6061SAndrew J. Bennieston if (err) { 205950ee6061SAndrew J. Bennieston message = "writing tx-ring-ref"; 206050ee6061SAndrew J. Bennieston goto error; 206150ee6061SAndrew J. Bennieston } 206250ee6061SAndrew J. Bennieston 206350ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 206450ee6061SAndrew J. Bennieston queue->rx_ring_ref); 206550ee6061SAndrew J. Bennieston if (err) { 206650ee6061SAndrew J. Bennieston message = "writing rx-ring-ref"; 206750ee6061SAndrew J. Bennieston goto error; 206850ee6061SAndrew J. Bennieston } 206950ee6061SAndrew J. Bennieston 207050ee6061SAndrew J. Bennieston /* Write event channels; taking into account both shared 207150ee6061SAndrew J. Bennieston * and split event channel scenarios. 207250ee6061SAndrew J. Bennieston */ 207350ee6061SAndrew J. Bennieston if (queue->tx_evtchn == queue->rx_evtchn) { 207450ee6061SAndrew J. Bennieston /* Shared event channel */ 207550ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 207650ee6061SAndrew J. Bennieston "event-channel", "%u", queue->tx_evtchn); 207750ee6061SAndrew J. Bennieston if (err) { 207850ee6061SAndrew J. Bennieston message = "writing event-channel"; 207950ee6061SAndrew J. Bennieston goto error; 208050ee6061SAndrew J. Bennieston } 208150ee6061SAndrew J. Bennieston } else { 208250ee6061SAndrew J. Bennieston /* Split event channels */ 208350ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 208450ee6061SAndrew J. Bennieston "event-channel-tx", "%u", queue->tx_evtchn); 208550ee6061SAndrew J. Bennieston if (err) { 208650ee6061SAndrew J. Bennieston message = "writing event-channel-tx"; 208750ee6061SAndrew J. Bennieston goto error; 208850ee6061SAndrew J. Bennieston } 208950ee6061SAndrew J. Bennieston 209050ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 209150ee6061SAndrew J. Bennieston "event-channel-rx", "%u", queue->rx_evtchn); 209250ee6061SAndrew J. Bennieston if (err) { 209350ee6061SAndrew J. Bennieston message = "writing event-channel-rx"; 209450ee6061SAndrew J. Bennieston goto error; 209550ee6061SAndrew J. Bennieston } 209650ee6061SAndrew J. Bennieston } 209750ee6061SAndrew J. Bennieston 209850ee6061SAndrew J. Bennieston if (write_hierarchical) 209950ee6061SAndrew J. Bennieston kfree(path); 210050ee6061SAndrew J. Bennieston return 0; 210150ee6061SAndrew J. Bennieston 210250ee6061SAndrew J. Bennieston error: 210350ee6061SAndrew J. Bennieston if (write_hierarchical) 210450ee6061SAndrew J. Bennieston kfree(path); 210550ee6061SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "%s", message); 210650ee6061SAndrew J. Bennieston return err; 210750ee6061SAndrew J. Bennieston } 210850ee6061SAndrew J. Bennieston 21096c5aa6fcSDenis Kirjanov 21106c5aa6fcSDenis Kirjanov 21116c5aa6fcSDenis Kirjanov static int xennet_create_page_pool(struct netfront_queue *queue) 21126c5aa6fcSDenis Kirjanov { 21136c5aa6fcSDenis Kirjanov int err; 21146c5aa6fcSDenis Kirjanov struct page_pool_params pp_params = { 21156c5aa6fcSDenis Kirjanov .order = 0, 21166c5aa6fcSDenis Kirjanov .flags = 0, 21176c5aa6fcSDenis Kirjanov .pool_size = NET_RX_RING_SIZE, 21186c5aa6fcSDenis Kirjanov .nid = NUMA_NO_NODE, 21196c5aa6fcSDenis Kirjanov .dev = &queue->info->netdev->dev, 21206c5aa6fcSDenis Kirjanov .offset = XDP_PACKET_HEADROOM, 21216c5aa6fcSDenis Kirjanov .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, 21226c5aa6fcSDenis Kirjanov }; 21236c5aa6fcSDenis Kirjanov 21246c5aa6fcSDenis Kirjanov queue->page_pool = page_pool_create(&pp_params); 21256c5aa6fcSDenis Kirjanov if (IS_ERR(queue->page_pool)) { 21266c5aa6fcSDenis Kirjanov err = PTR_ERR(queue->page_pool); 21276c5aa6fcSDenis Kirjanov queue->page_pool = NULL; 21286c5aa6fcSDenis Kirjanov return err; 21296c5aa6fcSDenis Kirjanov } 21306c5aa6fcSDenis Kirjanov 21316c5aa6fcSDenis Kirjanov err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, 2132b02e5a0eSBjörn Töpel queue->id, 0); 21336c5aa6fcSDenis Kirjanov if (err) { 21346c5aa6fcSDenis Kirjanov netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); 21356c5aa6fcSDenis Kirjanov goto err_free_pp; 21366c5aa6fcSDenis Kirjanov } 21376c5aa6fcSDenis Kirjanov 21386c5aa6fcSDenis Kirjanov err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, 21396c5aa6fcSDenis Kirjanov MEM_TYPE_PAGE_POOL, queue->page_pool); 21406c5aa6fcSDenis Kirjanov if (err) { 21416c5aa6fcSDenis Kirjanov netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n"); 21426c5aa6fcSDenis Kirjanov goto err_unregister_rxq; 21436c5aa6fcSDenis Kirjanov } 21446c5aa6fcSDenis Kirjanov return 0; 21456c5aa6fcSDenis Kirjanov 21466c5aa6fcSDenis Kirjanov err_unregister_rxq: 21476c5aa6fcSDenis Kirjanov xdp_rxq_info_unreg(&queue->xdp_rxq); 21486c5aa6fcSDenis Kirjanov err_free_pp: 21496c5aa6fcSDenis Kirjanov page_pool_destroy(queue->page_pool); 21506c5aa6fcSDenis Kirjanov queue->page_pool = NULL; 21516c5aa6fcSDenis Kirjanov return err; 21526c5aa6fcSDenis Kirjanov } 21536c5aa6fcSDenis Kirjanov 2154ce58725fSDavid Vrabel static int xennet_create_queues(struct netfront_info *info, 2155ca88ea12SJoe Jin unsigned int *num_queues) 2156ce58725fSDavid Vrabel { 2157ce58725fSDavid Vrabel unsigned int i; 2158ce58725fSDavid Vrabel int ret; 2159ce58725fSDavid Vrabel 2160ca88ea12SJoe Jin info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), 2161ce58725fSDavid Vrabel GFP_KERNEL); 2162ce58725fSDavid Vrabel if (!info->queues) 2163ce58725fSDavid Vrabel return -ENOMEM; 2164ce58725fSDavid Vrabel 2165ca88ea12SJoe Jin for (i = 0; i < *num_queues; i++) { 2166ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 2167ce58725fSDavid Vrabel 2168ce58725fSDavid Vrabel queue->id = i; 2169ce58725fSDavid Vrabel queue->info = info; 2170ce58725fSDavid Vrabel 2171ce58725fSDavid Vrabel ret = xennet_init_queue(queue); 2172ce58725fSDavid Vrabel if (ret < 0) { 2173f599c64fSRoss Lagerwall dev_warn(&info->xbdev->dev, 217469cb8524SDavid Vrabel "only created %d queues\n", i); 2175ca88ea12SJoe Jin *num_queues = i; 2176ce58725fSDavid Vrabel break; 2177ce58725fSDavid Vrabel } 2178ce58725fSDavid Vrabel 21796c5aa6fcSDenis Kirjanov /* use page pool recycling instead of buddy allocator */ 21806c5aa6fcSDenis Kirjanov ret = xennet_create_page_pool(queue); 21816c5aa6fcSDenis Kirjanov if (ret < 0) { 21826c5aa6fcSDenis Kirjanov dev_err(&info->xbdev->dev, "can't allocate page pool\n"); 21836c5aa6fcSDenis Kirjanov *num_queues = i; 21846c5aa6fcSDenis Kirjanov return ret; 21856c5aa6fcSDenis Kirjanov } 21866c5aa6fcSDenis Kirjanov 2187ce58725fSDavid Vrabel netif_napi_add(queue->info->netdev, &queue->napi, 2188ce58725fSDavid Vrabel xennet_poll, 64); 2189ce58725fSDavid Vrabel if (netif_running(info->netdev)) 2190ce58725fSDavid Vrabel napi_enable(&queue->napi); 2191ce58725fSDavid Vrabel } 2192ce58725fSDavid Vrabel 2193ca88ea12SJoe Jin netif_set_real_num_tx_queues(info->netdev, *num_queues); 2194ce58725fSDavid Vrabel 2195ca88ea12SJoe Jin if (*num_queues == 0) { 2196f599c64fSRoss Lagerwall dev_err(&info->xbdev->dev, "no queues\n"); 2197ce58725fSDavid Vrabel return -EINVAL; 2198ce58725fSDavid Vrabel } 2199ce58725fSDavid Vrabel return 0; 2200ce58725fSDavid Vrabel } 2201ce58725fSDavid Vrabel 22020d160211SJeremy Fitzhardinge /* Common code used when first setting up, and when resuming. */ 2203f502bf2bSIan Campbell static int talk_to_netback(struct xenbus_device *dev, 22040d160211SJeremy Fitzhardinge struct netfront_info *info) 22050d160211SJeremy Fitzhardinge { 22060d160211SJeremy Fitzhardinge const char *message; 22070d160211SJeremy Fitzhardinge struct xenbus_transaction xbt; 22080d160211SJeremy Fitzhardinge int err; 22092688fcb7SAndrew J. Bennieston unsigned int feature_split_evtchn; 22102688fcb7SAndrew J. Bennieston unsigned int i = 0; 221150ee6061SAndrew J. Bennieston unsigned int max_queues = 0; 22122688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 22132688fcb7SAndrew J. Bennieston unsigned int num_queues = 1; 221493772114SJakub Kicinski u8 addr[ETH_ALEN]; 22150d160211SJeremy Fitzhardinge 22162688fcb7SAndrew J. Bennieston info->netdev->irq = 0; 22172688fcb7SAndrew J. Bennieston 221850ee6061SAndrew J. Bennieston /* Check if backend supports multiple queues */ 22192890ea5cSJuergen Gross max_queues = xenbus_read_unsigned(info->xbdev->otherend, 22202890ea5cSJuergen Gross "multi-queue-max-queues", 1); 222150ee6061SAndrew J. Bennieston num_queues = min(max_queues, xennet_max_queues); 222250ee6061SAndrew J. Bennieston 22232688fcb7SAndrew J. Bennieston /* Check feature-split-event-channels */ 22242890ea5cSJuergen Gross feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, 22252890ea5cSJuergen Gross "feature-split-event-channels", 0); 22262688fcb7SAndrew J. Bennieston 22272688fcb7SAndrew J. Bennieston /* Read mac addr. */ 222893772114SJakub Kicinski err = xen_net_read_mac(dev, addr); 22292688fcb7SAndrew J. Bennieston if (err) { 22302688fcb7SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 2231cb257783SRoss Lagerwall goto out_unlocked; 22322688fcb7SAndrew J. Bennieston } 223393772114SJakub Kicinski eth_hw_addr_set(info->netdev, addr); 22342688fcb7SAndrew J. Bennieston 22356c5aa6fcSDenis Kirjanov info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, 22366c5aa6fcSDenis Kirjanov "feature-xdp-headroom", 0); 22376c5aa6fcSDenis Kirjanov if (info->netback_has_xdp_headroom) { 22386c5aa6fcSDenis Kirjanov /* set the current xen-netfront xdp state */ 22396c5aa6fcSDenis Kirjanov err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ? 22406c5aa6fcSDenis Kirjanov NETBACK_XDP_HEADROOM_ENABLE : 22416c5aa6fcSDenis Kirjanov NETBACK_XDP_HEADROOM_DISABLE); 22426c5aa6fcSDenis Kirjanov if (err) 22436c5aa6fcSDenis Kirjanov goto out_unlocked; 22446c5aa6fcSDenis Kirjanov } 22456c5aa6fcSDenis Kirjanov 2246f599c64fSRoss Lagerwall rtnl_lock(); 2247ce58725fSDavid Vrabel if (info->queues) 2248ce58725fSDavid Vrabel xennet_destroy_queues(info); 2249ce58725fSDavid Vrabel 2250a884daa6SJuergen Gross /* For the case of a reconnect reset the "broken" indicator. */ 2251a884daa6SJuergen Gross info->broken = false; 2252a884daa6SJuergen Gross 2253ca88ea12SJoe Jin err = xennet_create_queues(info, &num_queues); 2254e2e004acSRoss Lagerwall if (err < 0) { 2255e2e004acSRoss Lagerwall xenbus_dev_fatal(dev, err, "creating queues"); 2256e2e004acSRoss Lagerwall kfree(info->queues); 2257e2e004acSRoss Lagerwall info->queues = NULL; 2258e2e004acSRoss Lagerwall goto out; 2259e2e004acSRoss Lagerwall } 2260f599c64fSRoss Lagerwall rtnl_unlock(); 22612688fcb7SAndrew J. Bennieston 22622688fcb7SAndrew J. Bennieston /* Create shared ring, alloc event channel -- for each queue */ 22632688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 22642688fcb7SAndrew J. Bennieston queue = &info->queues[i]; 22652688fcb7SAndrew J. Bennieston err = setup_netfront(dev, queue, feature_split_evtchn); 2266e2e004acSRoss Lagerwall if (err) 22672688fcb7SAndrew J. Bennieston goto destroy_ring; 22682688fcb7SAndrew J. Bennieston } 22690d160211SJeremy Fitzhardinge 22700d160211SJeremy Fitzhardinge again: 22710d160211SJeremy Fitzhardinge err = xenbus_transaction_start(&xbt); 22720d160211SJeremy Fitzhardinge if (err) { 22730d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "starting transaction"); 22740d160211SJeremy Fitzhardinge goto destroy_ring; 22750d160211SJeremy Fitzhardinge } 22760d160211SJeremy Fitzhardinge 2277812494d9Schas williams if (xenbus_exists(XBT_NIL, 2278812494d9Schas williams info->xbdev->otherend, "multi-queue-max-queues")) { 2279812494d9Schas williams /* Write the number of queues */ 2280812494d9Schas williams err = xenbus_printf(xbt, dev->nodename, 2281812494d9Schas williams "multi-queue-num-queues", "%u", num_queues); 2282812494d9Schas williams if (err) { 2283812494d9Schas williams message = "writing multi-queue-num-queues"; 2284812494d9Schas williams goto abort_transaction_no_dev_fatal; 2285812494d9Schas williams } 2286812494d9Schas williams } 2287812494d9Schas williams 228850ee6061SAndrew J. Bennieston if (num_queues == 1) { 228950ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 229050ee6061SAndrew J. Bennieston if (err) 229150ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 2292d634bf2cSWei Liu } else { 229350ee6061SAndrew J. Bennieston /* Write the keys for each queue */ 229450ee6061SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 229550ee6061SAndrew J. Bennieston queue = &info->queues[i]; 229650ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 229750ee6061SAndrew J. Bennieston if (err) 229850ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 2299d634bf2cSWei Liu } 2300d634bf2cSWei Liu } 23010d160211SJeremy Fitzhardinge 230250ee6061SAndrew J. Bennieston /* The remaining keys are not queue-specific */ 23030d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 23040d160211SJeremy Fitzhardinge 1); 23050d160211SJeremy Fitzhardinge if (err) { 23060d160211SJeremy Fitzhardinge message = "writing request-rx-copy"; 23070d160211SJeremy Fitzhardinge goto abort_transaction; 23080d160211SJeremy Fitzhardinge } 23090d160211SJeremy Fitzhardinge 23100d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 23110d160211SJeremy Fitzhardinge if (err) { 23120d160211SJeremy Fitzhardinge message = "writing feature-rx-notify"; 23130d160211SJeremy Fitzhardinge goto abort_transaction; 23140d160211SJeremy Fitzhardinge } 23150d160211SJeremy Fitzhardinge 23160d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 23170d160211SJeremy Fitzhardinge if (err) { 23180d160211SJeremy Fitzhardinge message = "writing feature-sg"; 23190d160211SJeremy Fitzhardinge goto abort_transaction; 23200d160211SJeremy Fitzhardinge } 23210d160211SJeremy Fitzhardinge 23220d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 23230d160211SJeremy Fitzhardinge if (err) { 23240d160211SJeremy Fitzhardinge message = "writing feature-gso-tcpv4"; 23250d160211SJeremy Fitzhardinge goto abort_transaction; 23260d160211SJeremy Fitzhardinge } 23270d160211SJeremy Fitzhardinge 23282c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 23292c0057deSPaul Durrant if (err) { 23302c0057deSPaul Durrant message = "writing feature-gso-tcpv6"; 23312c0057deSPaul Durrant goto abort_transaction; 23322c0057deSPaul Durrant } 23332c0057deSPaul Durrant 23342c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 23352c0057deSPaul Durrant "1"); 23362c0057deSPaul Durrant if (err) { 23372c0057deSPaul Durrant message = "writing feature-ipv6-csum-offload"; 23382c0057deSPaul Durrant goto abort_transaction; 23392c0057deSPaul Durrant } 23402c0057deSPaul Durrant 23410d160211SJeremy Fitzhardinge err = xenbus_transaction_end(xbt, 0); 23420d160211SJeremy Fitzhardinge if (err) { 23430d160211SJeremy Fitzhardinge if (err == -EAGAIN) 23440d160211SJeremy Fitzhardinge goto again; 23450d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "completing transaction"); 23460d160211SJeremy Fitzhardinge goto destroy_ring; 23470d160211SJeremy Fitzhardinge } 23480d160211SJeremy Fitzhardinge 23490d160211SJeremy Fitzhardinge return 0; 23500d160211SJeremy Fitzhardinge 23510d160211SJeremy Fitzhardinge abort_transaction: 23520d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "%s", message); 235350ee6061SAndrew J. Bennieston abort_transaction_no_dev_fatal: 235450ee6061SAndrew J. Bennieston xenbus_transaction_end(xbt, 1); 23550d160211SJeremy Fitzhardinge destroy_ring: 23560d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 2357f599c64fSRoss Lagerwall rtnl_lock(); 2358e2e004acSRoss Lagerwall xennet_destroy_queues(info); 23590d160211SJeremy Fitzhardinge out: 2360f599c64fSRoss Lagerwall rtnl_unlock(); 2361cb257783SRoss Lagerwall out_unlocked: 2362d86b5672SVitaly Kuznetsov device_unregister(&dev->dev); 23630d160211SJeremy Fitzhardinge return err; 23640d160211SJeremy Fitzhardinge } 23650d160211SJeremy Fitzhardinge 23660d160211SJeremy Fitzhardinge static int xennet_connect(struct net_device *dev) 23670d160211SJeremy Fitzhardinge { 23680d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 23692688fcb7SAndrew J. Bennieston unsigned int num_queues = 0; 2370a5b5dc3cSDavid Vrabel int err; 23712688fcb7SAndrew J. Bennieston unsigned int j = 0; 23722688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 23730d160211SJeremy Fitzhardinge 23742890ea5cSJuergen Gross if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { 23750d160211SJeremy Fitzhardinge dev_info(&dev->dev, 2376898eb71cSJoe Perches "backend does not support copying receive path\n"); 23770d160211SJeremy Fitzhardinge return -ENODEV; 23780d160211SJeremy Fitzhardinge } 23790d160211SJeremy Fitzhardinge 2380f502bf2bSIan Campbell err = talk_to_netback(np->xbdev, np); 23810d160211SJeremy Fitzhardinge if (err) 23820d160211SJeremy Fitzhardinge return err; 23836c5aa6fcSDenis Kirjanov if (np->netback_has_xdp_headroom) 23846c5aa6fcSDenis Kirjanov pr_info("backend supports XDP headroom\n"); 23850d160211SJeremy Fitzhardinge 23862688fcb7SAndrew J. Bennieston /* talk_to_netback() sets the correct number of queues */ 23872688fcb7SAndrew J. Bennieston num_queues = dev->real_num_tx_queues; 23882688fcb7SAndrew J. Bennieston 2389f599c64fSRoss Lagerwall if (dev->reg_state == NETREG_UNINITIALIZED) { 2390f599c64fSRoss Lagerwall err = register_netdev(dev); 2391f599c64fSRoss Lagerwall if (err) { 2392f599c64fSRoss Lagerwall pr_warn("%s: register_netdev err=%d\n", __func__, err); 2393f599c64fSRoss Lagerwall device_unregister(&np->xbdev->dev); 2394f599c64fSRoss Lagerwall return err; 2395f599c64fSRoss Lagerwall } 2396f599c64fSRoss Lagerwall } 2397f599c64fSRoss Lagerwall 239845c8184cSRoss Lagerwall rtnl_lock(); 239945c8184cSRoss Lagerwall netdev_update_features(dev); 240045c8184cSRoss Lagerwall rtnl_unlock(); 240145c8184cSRoss Lagerwall 24020d160211SJeremy Fitzhardinge /* 2403a5b5dc3cSDavid Vrabel * All public and private state should now be sane. Get 24040d160211SJeremy Fitzhardinge * ready to start sending and receiving packets and give the driver 24050d160211SJeremy Fitzhardinge * domain a kick because we've probably just requeued some 24060d160211SJeremy Fitzhardinge * packets. 24070d160211SJeremy Fitzhardinge */ 2408042b2046SDongli Zhang netif_tx_lock_bh(np->netdev); 2409042b2046SDongli Zhang netif_device_attach(np->netdev); 2410042b2046SDongli Zhang netif_tx_unlock_bh(np->netdev); 2411042b2046SDongli Zhang 24120d160211SJeremy Fitzhardinge netif_carrier_on(np->netdev); 24132688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) { 24142688fcb7SAndrew J. Bennieston queue = &np->queues[j]; 2415f50b4076SDavid Vrabel 24162688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 24172688fcb7SAndrew J. Bennieston if (queue->tx_irq != queue->rx_irq) 24182688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 24190d160211SJeremy Fitzhardinge 2420f50b4076SDavid Vrabel spin_lock_irq(&queue->tx_lock); 2421f50b4076SDavid Vrabel xennet_tx_buf_gc(queue); 24222688fcb7SAndrew J. Bennieston spin_unlock_irq(&queue->tx_lock); 2423f50b4076SDavid Vrabel 2424f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock); 2425f50b4076SDavid Vrabel xennet_alloc_rx_buffers(queue); 24262688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 24272688fcb7SAndrew J. Bennieston } 24280d160211SJeremy Fitzhardinge 24290d160211SJeremy Fitzhardinge return 0; 24300d160211SJeremy Fitzhardinge } 24310d160211SJeremy Fitzhardinge 243280708602SLee Jones /* 24330d160211SJeremy Fitzhardinge * Callback received when the backend's state changes. 24340d160211SJeremy Fitzhardinge */ 2435f502bf2bSIan Campbell static void netback_changed(struct xenbus_device *dev, 24360d160211SJeremy Fitzhardinge enum xenbus_state backend_state) 24370d160211SJeremy Fitzhardinge { 24381b713e00SGreg Kroah-Hartman struct netfront_info *np = dev_get_drvdata(&dev->dev); 24390d160211SJeremy Fitzhardinge struct net_device *netdev = np->netdev; 24400d160211SJeremy Fitzhardinge 24410d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 24420d160211SJeremy Fitzhardinge 24438edfe2e9SJuergen Gross wake_up_all(&module_wq); 24448edfe2e9SJuergen Gross 24450d160211SJeremy Fitzhardinge switch (backend_state) { 24460d160211SJeremy Fitzhardinge case XenbusStateInitialising: 24470d160211SJeremy Fitzhardinge case XenbusStateInitialised: 2448b78c9512SNoboru Iwamatsu case XenbusStateReconfiguring: 2449b78c9512SNoboru Iwamatsu case XenbusStateReconfigured: 24500d160211SJeremy Fitzhardinge case XenbusStateUnknown: 24510d160211SJeremy Fitzhardinge break; 24520d160211SJeremy Fitzhardinge 24530d160211SJeremy Fitzhardinge case XenbusStateInitWait: 24540d160211SJeremy Fitzhardinge if (dev->state != XenbusStateInitialising) 24550d160211SJeremy Fitzhardinge break; 24560d160211SJeremy Fitzhardinge if (xennet_connect(netdev) != 0) 24570d160211SJeremy Fitzhardinge break; 24580d160211SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateConnected); 245908e34eb1SLaszlo Ersek break; 246008e34eb1SLaszlo Ersek 246108e34eb1SLaszlo Ersek case XenbusStateConnected: 2462ee89bab1SAmerigo Wang netdev_notify_peers(netdev); 24630d160211SJeremy Fitzhardinge break; 24640d160211SJeremy Fitzhardinge 2465bce3ea81SDavid Vrabel case XenbusStateClosed: 2466bce3ea81SDavid Vrabel if (dev->state == XenbusStateClosed) 2467bce3ea81SDavid Vrabel break; 2468df561f66SGustavo A. R. Silva fallthrough; /* Missed the backend's CLOSING state */ 24690d160211SJeremy Fitzhardinge case XenbusStateClosing: 24700d160211SJeremy Fitzhardinge xenbus_frontend_closed(dev); 24710d160211SJeremy Fitzhardinge break; 24720d160211SJeremy Fitzhardinge } 24730d160211SJeremy Fitzhardinge } 24740d160211SJeremy Fitzhardinge 2475e0ce4af9SIan Campbell static const struct xennet_stat { 2476e0ce4af9SIan Campbell char name[ETH_GSTRING_LEN]; 2477e0ce4af9SIan Campbell u16 offset; 2478e0ce4af9SIan Campbell } xennet_stats[] = { 2479e0ce4af9SIan Campbell { 2480e0ce4af9SIan Campbell "rx_gso_checksum_fixup", 2481e0ce4af9SIan Campbell offsetof(struct netfront_info, rx_gso_checksum_fixup) 2482e0ce4af9SIan Campbell }, 2483e0ce4af9SIan Campbell }; 2484e0ce4af9SIan Campbell 2485e0ce4af9SIan Campbell static int xennet_get_sset_count(struct net_device *dev, int string_set) 2486e0ce4af9SIan Campbell { 2487e0ce4af9SIan Campbell switch (string_set) { 2488e0ce4af9SIan Campbell case ETH_SS_STATS: 2489e0ce4af9SIan Campbell return ARRAY_SIZE(xennet_stats); 2490e0ce4af9SIan Campbell default: 2491e0ce4af9SIan Campbell return -EINVAL; 2492e0ce4af9SIan Campbell } 2493e0ce4af9SIan Campbell } 2494e0ce4af9SIan Campbell 2495e0ce4af9SIan Campbell static void xennet_get_ethtool_stats(struct net_device *dev, 2496e0ce4af9SIan Campbell struct ethtool_stats *stats, u64 * data) 2497e0ce4af9SIan Campbell { 2498e0ce4af9SIan Campbell void *np = netdev_priv(dev); 2499e0ce4af9SIan Campbell int i; 2500e0ce4af9SIan Campbell 2501e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 25022688fcb7SAndrew J. Bennieston data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2503e0ce4af9SIan Campbell } 2504e0ce4af9SIan Campbell 2505e0ce4af9SIan Campbell static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2506e0ce4af9SIan Campbell { 2507e0ce4af9SIan Campbell int i; 2508e0ce4af9SIan Campbell 2509e0ce4af9SIan Campbell switch (stringset) { 2510e0ce4af9SIan Campbell case ETH_SS_STATS: 2511e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2512e0ce4af9SIan Campbell memcpy(data + i * ETH_GSTRING_LEN, 2513e0ce4af9SIan Campbell xennet_stats[i].name, ETH_GSTRING_LEN); 2514e0ce4af9SIan Campbell break; 2515e0ce4af9SIan Campbell } 2516e0ce4af9SIan Campbell } 2517e0ce4af9SIan Campbell 25180fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops = 25190d160211SJeremy Fitzhardinge { 25200d160211SJeremy Fitzhardinge .get_link = ethtool_op_get_link, 2521e0ce4af9SIan Campbell 2522e0ce4af9SIan Campbell .get_sset_count = xennet_get_sset_count, 2523e0ce4af9SIan Campbell .get_ethtool_stats = xennet_get_ethtool_stats, 2524e0ce4af9SIan Campbell .get_strings = xennet_get_strings, 252591ffb9d3SDaniel Drown .get_ts_info = ethtool_op_get_ts_info, 25260d160211SJeremy Fitzhardinge }; 25270d160211SJeremy Fitzhardinge 25280d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 25291f3c2ebaSDavid Vrabel static ssize_t show_rxbuf(struct device *dev, 25300d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 25310d160211SJeremy Fitzhardinge { 25321f3c2ebaSDavid Vrabel return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); 25330d160211SJeremy Fitzhardinge } 25340d160211SJeremy Fitzhardinge 25351f3c2ebaSDavid Vrabel static ssize_t store_rxbuf(struct device *dev, 25360d160211SJeremy Fitzhardinge struct device_attribute *attr, 25370d160211SJeremy Fitzhardinge const char *buf, size_t len) 25380d160211SJeremy Fitzhardinge { 25390d160211SJeremy Fitzhardinge char *endp; 25400d160211SJeremy Fitzhardinge 25410d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN)) 25420d160211SJeremy Fitzhardinge return -EPERM; 25430d160211SJeremy Fitzhardinge 25448ed7ec13SAndrew Lunn simple_strtoul(buf, &endp, 0); 25450d160211SJeremy Fitzhardinge if (endp == buf) 25460d160211SJeremy Fitzhardinge return -EBADMSG; 25470d160211SJeremy Fitzhardinge 25481f3c2ebaSDavid Vrabel /* rxbuf_min and rxbuf_max are no longer configurable. */ 25490d160211SJeremy Fitzhardinge 25500d160211SJeremy Fitzhardinge return len; 25510d160211SJeremy Fitzhardinge } 25520d160211SJeremy Fitzhardinge 2553d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); 2554d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); 2555d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); 255627b917e5STakashi Iwai 255727b917e5STakashi Iwai static struct attribute *xennet_dev_attrs[] = { 255827b917e5STakashi Iwai &dev_attr_rxbuf_min.attr, 255927b917e5STakashi Iwai &dev_attr_rxbuf_max.attr, 256027b917e5STakashi Iwai &dev_attr_rxbuf_cur.attr, 256127b917e5STakashi Iwai NULL 25620d160211SJeremy Fitzhardinge }; 25630d160211SJeremy Fitzhardinge 256427b917e5STakashi Iwai static const struct attribute_group xennet_dev_group = { 256527b917e5STakashi Iwai .attrs = xennet_dev_attrs 256627b917e5STakashi Iwai }; 25670d160211SJeremy Fitzhardinge #endif /* CONFIG_SYSFS */ 25680d160211SJeremy Fitzhardinge 2569c2c63310SAndrea Righi static void xennet_bus_close(struct xenbus_device *dev) 2570c2c63310SAndrea Righi { 2571c2c63310SAndrea Righi int ret; 2572c2c63310SAndrea Righi 2573c2c63310SAndrea Righi if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2574c2c63310SAndrea Righi return; 2575c2c63310SAndrea Righi do { 2576c2c63310SAndrea Righi xenbus_switch_state(dev, XenbusStateClosing); 2577c2c63310SAndrea Righi ret = wait_event_timeout(module_wq, 2578c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2579c2c63310SAndrea Righi XenbusStateClosing || 2580c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2581c2c63310SAndrea Righi XenbusStateClosed || 2582c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2583c2c63310SAndrea Righi XenbusStateUnknown, 2584c2c63310SAndrea Righi XENNET_TIMEOUT); 2585c2c63310SAndrea Righi } while (!ret); 2586c2c63310SAndrea Righi 2587c2c63310SAndrea Righi if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2588c2c63310SAndrea Righi return; 2589c2c63310SAndrea Righi 2590c2c63310SAndrea Righi do { 2591c2c63310SAndrea Righi xenbus_switch_state(dev, XenbusStateClosed); 2592c2c63310SAndrea Righi ret = wait_event_timeout(module_wq, 2593c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2594c2c63310SAndrea Righi XenbusStateClosed || 2595c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2596c2c63310SAndrea Righi XenbusStateUnknown, 2597c2c63310SAndrea Righi XENNET_TIMEOUT); 2598c2c63310SAndrea Righi } while (!ret); 2599c2c63310SAndrea Righi } 2600c2c63310SAndrea Righi 26018e0e46bbSBill Pemberton static int xennet_remove(struct xenbus_device *dev) 26020d160211SJeremy Fitzhardinge { 26031b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 26040d160211SJeremy Fitzhardinge 2605c2c63310SAndrea Righi xennet_bus_close(dev); 26060d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 26070d160211SJeremy Fitzhardinge 2608f599c64fSRoss Lagerwall if (info->netdev->reg_state == NETREG_REGISTERED) 26096bc96d04SIan Campbell unregister_netdev(info->netdev); 26106bc96d04SIan Campbell 2611f599c64fSRoss Lagerwall if (info->queues) { 2612f599c64fSRoss Lagerwall rtnl_lock(); 2613ad068118SDavid Vrabel xennet_destroy_queues(info); 2614f599c64fSRoss Lagerwall rtnl_unlock(); 2615f599c64fSRoss Lagerwall } 2616900e1833SDavid Vrabel xennet_free_netdev(info->netdev); 26170d160211SJeremy Fitzhardinge 26180d160211SJeremy Fitzhardinge return 0; 26190d160211SJeremy Fitzhardinge } 26200d160211SJeremy Fitzhardinge 262195afae48SDavid Vrabel static const struct xenbus_device_id netfront_ids[] = { 262295afae48SDavid Vrabel { "vif" }, 262395afae48SDavid Vrabel { "" } 262495afae48SDavid Vrabel }; 262595afae48SDavid Vrabel 262695afae48SDavid Vrabel static struct xenbus_driver netfront_driver = { 262795afae48SDavid Vrabel .ids = netfront_ids, 26280d160211SJeremy Fitzhardinge .probe = netfront_probe, 26298e0e46bbSBill Pemberton .remove = xennet_remove, 26300d160211SJeremy Fitzhardinge .resume = netfront_resume, 2631f502bf2bSIan Campbell .otherend_changed = netback_changed, 263295afae48SDavid Vrabel }; 26330d160211SJeremy Fitzhardinge 26340d160211SJeremy Fitzhardinge static int __init netif_init(void) 26350d160211SJeremy Fitzhardinge { 26366e833587SJeremy Fitzhardinge if (!xen_domain()) 26370d160211SJeremy Fitzhardinge return -ENODEV; 26380d160211SJeremy Fitzhardinge 263951c71a3bSKonrad Rzeszutek Wilk if (!xen_has_pv_nic_devices()) 2640b9136d20SIgor Mammedov return -ENODEV; 2641b9136d20SIgor Mammedov 2642383eda32SJoe Perches pr_info("Initialising Xen virtual ethernet driver\n"); 26430d160211SJeremy Fitzhardinge 2644034702a6SJuergen Gross /* Allow as many queues as there are CPUs inut max. 8 if user has not 264532a84405SWei Liu * specified a value. 264632a84405SWei Liu */ 264732a84405SWei Liu if (xennet_max_queues == 0) 2648034702a6SJuergen Gross xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, 2649034702a6SJuergen Gross num_online_cpus()); 265050ee6061SAndrew J. Bennieston 2651ffb78a26SAl Viro return xenbus_register_frontend(&netfront_driver); 26520d160211SJeremy Fitzhardinge } 26530d160211SJeremy Fitzhardinge module_init(netif_init); 26540d160211SJeremy Fitzhardinge 26550d160211SJeremy Fitzhardinge 26560d160211SJeremy Fitzhardinge static void __exit netif_exit(void) 26570d160211SJeremy Fitzhardinge { 2658ffb78a26SAl Viro xenbus_unregister_driver(&netfront_driver); 26590d160211SJeremy Fitzhardinge } 26600d160211SJeremy Fitzhardinge module_exit(netif_exit); 26610d160211SJeremy Fitzhardinge 26620d160211SJeremy Fitzhardinge MODULE_DESCRIPTION("Xen virtual network device frontend"); 26630d160211SJeremy Fitzhardinge MODULE_LICENSE("GPL"); 2664d2f0c52bSMark McLoughlin MODULE_ALIAS("xen:vif"); 26654f93f09bSMark McLoughlin MODULE_ALIAS("xennet"); 2666