10d160211SJeremy Fitzhardinge /* 20d160211SJeremy Fitzhardinge * Virtual network driver for conversing with remote driver backends. 30d160211SJeremy Fitzhardinge * 40d160211SJeremy Fitzhardinge * Copyright (c) 2002-2005, K A Fraser 50d160211SJeremy Fitzhardinge * Copyright (c) 2005, XenSource Ltd 60d160211SJeremy Fitzhardinge * 70d160211SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or 80d160211SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2 90d160211SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed 100d160211SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other 110d160211SJeremy Fitzhardinge * software packages, subject to the following license: 120d160211SJeremy Fitzhardinge * 130d160211SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy 140d160211SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without 150d160211SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify, 160d160211SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software, 170d160211SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to 180d160211SJeremy Fitzhardinge * the following conditions: 190d160211SJeremy Fitzhardinge * 200d160211SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in 210d160211SJeremy Fitzhardinge * all copies or substantial portions of the Software. 220d160211SJeremy Fitzhardinge * 230d160211SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 240d160211SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 250d160211SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 260d160211SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 270d160211SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 280d160211SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 290d160211SJeremy Fitzhardinge * IN THE SOFTWARE. 300d160211SJeremy Fitzhardinge */ 310d160211SJeremy Fitzhardinge 32383eda32SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33383eda32SJoe Perches 340d160211SJeremy Fitzhardinge #include <linux/module.h> 350d160211SJeremy Fitzhardinge #include <linux/kernel.h> 360d160211SJeremy Fitzhardinge #include <linux/netdevice.h> 370d160211SJeremy Fitzhardinge #include <linux/etherdevice.h> 380d160211SJeremy Fitzhardinge #include <linux/skbuff.h> 390d160211SJeremy Fitzhardinge #include <linux/ethtool.h> 400d160211SJeremy Fitzhardinge #include <linux/if_ether.h> 419ecd1a75SWei Liu #include <net/tcp.h> 420d160211SJeremy Fitzhardinge #include <linux/udp.h> 430d160211SJeremy Fitzhardinge #include <linux/moduleparam.h> 440d160211SJeremy Fitzhardinge #include <linux/mm.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 460d160211SJeremy Fitzhardinge #include <net/ip.h> 470d160211SJeremy Fitzhardinge 48ca981633SStefano Stabellini #include <asm/xen/page.h> 491ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 500d160211SJeremy Fitzhardinge #include <xen/xenbus.h> 510d160211SJeremy Fitzhardinge #include <xen/events.h> 520d160211SJeremy Fitzhardinge #include <xen/page.h> 53b9136d20SIgor Mammedov #include <xen/platform_pci.h> 540d160211SJeremy Fitzhardinge #include <xen/grant_table.h> 550d160211SJeremy Fitzhardinge 560d160211SJeremy Fitzhardinge #include <xen/interface/io/netif.h> 570d160211SJeremy Fitzhardinge #include <xen/interface/memory.h> 580d160211SJeremy Fitzhardinge #include <xen/interface/grant_table.h> 590d160211SJeremy Fitzhardinge 6050ee6061SAndrew J. Bennieston /* Module parameters */ 6150ee6061SAndrew J. Bennieston static unsigned int xennet_max_queues; 6250ee6061SAndrew J. Bennieston module_param_named(max_queues, xennet_max_queues, uint, 0644); 6350ee6061SAndrew J. Bennieston MODULE_PARM_DESC(max_queues, 6450ee6061SAndrew J. Bennieston "Maximum number of queues per virtual interface"); 6550ee6061SAndrew J. Bennieston 660fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops; 670d160211SJeremy Fitzhardinge 680d160211SJeremy Fitzhardinge struct netfront_cb { 693683243bSIan Campbell int pull_to; 700d160211SJeremy Fitzhardinge }; 710d160211SJeremy Fitzhardinge 720d160211SJeremy Fitzhardinge #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 730d160211SJeremy Fitzhardinge 740d160211SJeremy Fitzhardinge #define RX_COPY_THRESHOLD 256 750d160211SJeremy Fitzhardinge 760d160211SJeremy Fitzhardinge #define GRANT_INVALID_REF 0 770d160211SJeremy Fitzhardinge 78667c78afSJeremy Fitzhardinge #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 79667c78afSJeremy Fitzhardinge #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 8040206dd9SWei Liu #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256) 810d160211SJeremy Fitzhardinge 822688fcb7SAndrew J. Bennieston /* Queue name is interface name with "-qNNN" appended */ 832688fcb7SAndrew J. Bennieston #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 842688fcb7SAndrew J. Bennieston 852688fcb7SAndrew J. Bennieston /* IRQ name is queue name with "-tx" or "-rx" appended */ 862688fcb7SAndrew J. Bennieston #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 872688fcb7SAndrew J. Bennieston 88e00f85beSstephen hemminger struct netfront_stats { 89e00f85beSstephen hemminger u64 rx_packets; 90e00f85beSstephen hemminger u64 tx_packets; 91e00f85beSstephen hemminger u64 rx_bytes; 92e00f85beSstephen hemminger u64 tx_bytes; 93e00f85beSstephen hemminger struct u64_stats_sync syncp; 94e00f85beSstephen hemminger }; 95e00f85beSstephen hemminger 962688fcb7SAndrew J. Bennieston struct netfront_info; 972688fcb7SAndrew J. Bennieston 982688fcb7SAndrew J. Bennieston struct netfront_queue { 992688fcb7SAndrew J. Bennieston unsigned int id; /* Queue ID, 0-based */ 1002688fcb7SAndrew J. Bennieston char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 1012688fcb7SAndrew J. Bennieston struct netfront_info *info; 1020d160211SJeremy Fitzhardinge 103bea3348eSStephen Hemminger struct napi_struct napi; 1040d160211SJeremy Fitzhardinge 105d634bf2cSWei Liu /* Split event channels support, tx_* == rx_* when using 106d634bf2cSWei Liu * single event channel. 107d634bf2cSWei Liu */ 108d634bf2cSWei Liu unsigned int tx_evtchn, rx_evtchn; 109d634bf2cSWei Liu unsigned int tx_irq, rx_irq; 110d634bf2cSWei Liu /* Only used when split event channels support is enabled */ 1112688fcb7SAndrew J. Bennieston char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 1122688fcb7SAndrew J. Bennieston char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 1130d160211SJeremy Fitzhardinge 1140d160211SJeremy Fitzhardinge spinlock_t tx_lock; 11584284d3cSJeremy Fitzhardinge struct xen_netif_tx_front_ring tx; 11684284d3cSJeremy Fitzhardinge int tx_ring_ref; 1170d160211SJeremy Fitzhardinge 1180d160211SJeremy Fitzhardinge /* 1190d160211SJeremy Fitzhardinge * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 1200d160211SJeremy Fitzhardinge * are linked from tx_skb_freelist through skb_entry.link. 1210d160211SJeremy Fitzhardinge * 1220d160211SJeremy Fitzhardinge * NB. Freelist index entries are always going to be less than 1230d160211SJeremy Fitzhardinge * PAGE_OFFSET, whereas pointers to skbs will always be equal or 1240d160211SJeremy Fitzhardinge * greater than PAGE_OFFSET: we use this property to distinguish 1250d160211SJeremy Fitzhardinge * them. 1260d160211SJeremy Fitzhardinge */ 1270d160211SJeremy Fitzhardinge union skb_entry { 1280d160211SJeremy Fitzhardinge struct sk_buff *skb; 1291ffb40b8SIsaku Yamahata unsigned long link; 1300d160211SJeremy Fitzhardinge } tx_skbs[NET_TX_RING_SIZE]; 1310d160211SJeremy Fitzhardinge grant_ref_t gref_tx_head; 1320d160211SJeremy Fitzhardinge grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 133cefe0078SAnnie Li struct page *grant_tx_page[NET_TX_RING_SIZE]; 1340d160211SJeremy Fitzhardinge unsigned tx_skb_freelist; 1350d160211SJeremy Fitzhardinge 13684284d3cSJeremy Fitzhardinge spinlock_t rx_lock ____cacheline_aligned_in_smp; 13784284d3cSJeremy Fitzhardinge struct xen_netif_rx_front_ring rx; 13884284d3cSJeremy Fitzhardinge int rx_ring_ref; 13984284d3cSJeremy Fitzhardinge 14084284d3cSJeremy Fitzhardinge /* Receive-ring batched refills. */ 14184284d3cSJeremy Fitzhardinge #define RX_MIN_TARGET 8 14284284d3cSJeremy Fitzhardinge #define RX_DFL_MIN_TARGET 64 14384284d3cSJeremy Fitzhardinge #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 14484284d3cSJeremy Fitzhardinge unsigned rx_min_target, rx_max_target, rx_target; 14584284d3cSJeremy Fitzhardinge struct sk_buff_head rx_batch; 14684284d3cSJeremy Fitzhardinge 14784284d3cSJeremy Fitzhardinge struct timer_list rx_refill_timer; 14884284d3cSJeremy Fitzhardinge 1490d160211SJeremy Fitzhardinge struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 1500d160211SJeremy Fitzhardinge grant_ref_t gref_rx_head; 1510d160211SJeremy Fitzhardinge grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 1520d160211SJeremy Fitzhardinge 1530d160211SJeremy Fitzhardinge unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 1540d160211SJeremy Fitzhardinge struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 1550d160211SJeremy Fitzhardinge struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 1562688fcb7SAndrew J. Bennieston }; 1572688fcb7SAndrew J. Bennieston 1582688fcb7SAndrew J. Bennieston struct netfront_info { 1592688fcb7SAndrew J. Bennieston struct list_head list; 1602688fcb7SAndrew J. Bennieston struct net_device *netdev; 1612688fcb7SAndrew J. Bennieston 1622688fcb7SAndrew J. Bennieston struct xenbus_device *xbdev; 1632688fcb7SAndrew J. Bennieston 1642688fcb7SAndrew J. Bennieston /* Multi-queue support */ 1652688fcb7SAndrew J. Bennieston struct netfront_queue *queues; 166e0ce4af9SIan Campbell 167e0ce4af9SIan Campbell /* Statistics */ 168e00f85beSstephen hemminger struct netfront_stats __percpu *stats; 169e00f85beSstephen hemminger 1702688fcb7SAndrew J. Bennieston atomic_t rx_gso_checksum_fixup; 1710d160211SJeremy Fitzhardinge }; 1720d160211SJeremy Fitzhardinge 1730d160211SJeremy Fitzhardinge struct netfront_rx_info { 1740d160211SJeremy Fitzhardinge struct xen_netif_rx_response rx; 1750d160211SJeremy Fitzhardinge struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 1760d160211SJeremy Fitzhardinge }; 1770d160211SJeremy Fitzhardinge 1781ffb40b8SIsaku Yamahata static void skb_entry_set_link(union skb_entry *list, unsigned short id) 1791ffb40b8SIsaku Yamahata { 1801ffb40b8SIsaku Yamahata list->link = id; 1811ffb40b8SIsaku Yamahata } 1821ffb40b8SIsaku Yamahata 1831ffb40b8SIsaku Yamahata static int skb_entry_is_link(const union skb_entry *list) 1841ffb40b8SIsaku Yamahata { 1851ffb40b8SIsaku Yamahata BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 186807540baSEric Dumazet return (unsigned long)list->skb < PAGE_OFFSET; 1871ffb40b8SIsaku Yamahata } 1881ffb40b8SIsaku Yamahata 1890d160211SJeremy Fitzhardinge /* 1900d160211SJeremy Fitzhardinge * Access macros for acquiring freeing slots in tx_skbs[]. 1910d160211SJeremy Fitzhardinge */ 1920d160211SJeremy Fitzhardinge 1930d160211SJeremy Fitzhardinge static void add_id_to_freelist(unsigned *head, union skb_entry *list, 1940d160211SJeremy Fitzhardinge unsigned short id) 1950d160211SJeremy Fitzhardinge { 1961ffb40b8SIsaku Yamahata skb_entry_set_link(&list[id], *head); 1970d160211SJeremy Fitzhardinge *head = id; 1980d160211SJeremy Fitzhardinge } 1990d160211SJeremy Fitzhardinge 2000d160211SJeremy Fitzhardinge static unsigned short get_id_from_freelist(unsigned *head, 2010d160211SJeremy Fitzhardinge union skb_entry *list) 2020d160211SJeremy Fitzhardinge { 2030d160211SJeremy Fitzhardinge unsigned int id = *head; 2040d160211SJeremy Fitzhardinge *head = list[id].link; 2050d160211SJeremy Fitzhardinge return id; 2060d160211SJeremy Fitzhardinge } 2070d160211SJeremy Fitzhardinge 2080d160211SJeremy Fitzhardinge static int xennet_rxidx(RING_IDX idx) 2090d160211SJeremy Fitzhardinge { 2100d160211SJeremy Fitzhardinge return idx & (NET_RX_RING_SIZE - 1); 2110d160211SJeremy Fitzhardinge } 2120d160211SJeremy Fitzhardinge 2132688fcb7SAndrew J. Bennieston static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 2140d160211SJeremy Fitzhardinge RING_IDX ri) 2150d160211SJeremy Fitzhardinge { 2160d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2172688fcb7SAndrew J. Bennieston struct sk_buff *skb = queue->rx_skbs[i]; 2182688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 2190d160211SJeremy Fitzhardinge return skb; 2200d160211SJeremy Fitzhardinge } 2210d160211SJeremy Fitzhardinge 2222688fcb7SAndrew J. Bennieston static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 2230d160211SJeremy Fitzhardinge RING_IDX ri) 2240d160211SJeremy Fitzhardinge { 2250d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2262688fcb7SAndrew J. Bennieston grant_ref_t ref = queue->grant_rx_ref[i]; 2272688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 2280d160211SJeremy Fitzhardinge return ref; 2290d160211SJeremy Fitzhardinge } 2300d160211SJeremy Fitzhardinge 2310d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 2320d160211SJeremy Fitzhardinge static int xennet_sysfs_addif(struct net_device *netdev); 2330d160211SJeremy Fitzhardinge static void xennet_sysfs_delif(struct net_device *netdev); 2340d160211SJeremy Fitzhardinge #else /* !CONFIG_SYSFS */ 2350d160211SJeremy Fitzhardinge #define xennet_sysfs_addif(dev) (0) 2360d160211SJeremy Fitzhardinge #define xennet_sysfs_delif(dev) do { } while (0) 2370d160211SJeremy Fitzhardinge #endif 2380d160211SJeremy Fitzhardinge 2393ad9b358SMichał Mirosław static bool xennet_can_sg(struct net_device *dev) 2400d160211SJeremy Fitzhardinge { 2413ad9b358SMichał Mirosław return dev->features & NETIF_F_SG; 2420d160211SJeremy Fitzhardinge } 2430d160211SJeremy Fitzhardinge 2440d160211SJeremy Fitzhardinge 2450d160211SJeremy Fitzhardinge static void rx_refill_timeout(unsigned long data) 2460d160211SJeremy Fitzhardinge { 2472688fcb7SAndrew J. Bennieston struct netfront_queue *queue = (struct netfront_queue *)data; 2482688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 2490d160211SJeremy Fitzhardinge } 2500d160211SJeremy Fitzhardinge 2512688fcb7SAndrew J. Bennieston static int netfront_tx_slot_available(struct netfront_queue *queue) 2520d160211SJeremy Fitzhardinge { 2532688fcb7SAndrew J. Bennieston return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 254807540baSEric Dumazet (TX_MAX_TARGET - MAX_SKB_FRAGS - 2); 2550d160211SJeremy Fitzhardinge } 2560d160211SJeremy Fitzhardinge 2572688fcb7SAndrew J. Bennieston static void xennet_maybe_wake_tx(struct netfront_queue *queue) 2580d160211SJeremy Fitzhardinge { 2592688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 2602688fcb7SAndrew J. Bennieston struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 2610d160211SJeremy Fitzhardinge 2622688fcb7SAndrew J. Bennieston if (unlikely(netif_tx_queue_stopped(dev_queue)) && 2632688fcb7SAndrew J. Bennieston netfront_tx_slot_available(queue) && 2640d160211SJeremy Fitzhardinge likely(netif_running(dev))) 2652688fcb7SAndrew J. Bennieston netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 2660d160211SJeremy Fitzhardinge } 2670d160211SJeremy Fitzhardinge 2682688fcb7SAndrew J. Bennieston static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 2690d160211SJeremy Fitzhardinge { 2700d160211SJeremy Fitzhardinge unsigned short id; 2710d160211SJeremy Fitzhardinge struct sk_buff *skb; 2720d160211SJeremy Fitzhardinge struct page *page; 2730d160211SJeremy Fitzhardinge int i, batch_target, notify; 2742688fcb7SAndrew J. Bennieston RING_IDX req_prod = queue->rx.req_prod_pvt; 2750d160211SJeremy Fitzhardinge grant_ref_t ref; 2760d160211SJeremy Fitzhardinge unsigned long pfn; 2770d160211SJeremy Fitzhardinge void *vaddr; 2780d160211SJeremy Fitzhardinge struct xen_netif_rx_request *req; 2790d160211SJeremy Fitzhardinge 2802688fcb7SAndrew J. Bennieston if (unlikely(!netif_carrier_ok(queue->info->netdev))) 2810d160211SJeremy Fitzhardinge return; 2820d160211SJeremy Fitzhardinge 2830d160211SJeremy Fitzhardinge /* 2840d160211SJeremy Fitzhardinge * Allocate skbuffs greedily, even though we batch updates to the 2850d160211SJeremy Fitzhardinge * receive ring. This creates a less bursty demand on the memory 2860d160211SJeremy Fitzhardinge * allocator, so should reduce the chance of failed allocation requests 2870d160211SJeremy Fitzhardinge * both for ourself and for other kernel subsystems. 2880d160211SJeremy Fitzhardinge */ 2892688fcb7SAndrew J. Bennieston batch_target = queue->rx_target - (req_prod - queue->rx.rsp_cons); 2902688fcb7SAndrew J. Bennieston for (i = skb_queue_len(&queue->rx_batch); i < batch_target; i++) { 2912688fcb7SAndrew J. Bennieston skb = __netdev_alloc_skb(queue->info->netdev, 2922688fcb7SAndrew J. Bennieston RX_COPY_THRESHOLD + NET_IP_ALIGN, 2930d160211SJeremy Fitzhardinge GFP_ATOMIC | __GFP_NOWARN); 2940d160211SJeremy Fitzhardinge if (unlikely(!skb)) 2950d160211SJeremy Fitzhardinge goto no_skb; 2960d160211SJeremy Fitzhardinge 297617a20bbSIsaku Yamahata /* Align ip header to a 16 bytes boundary */ 298617a20bbSIsaku Yamahata skb_reserve(skb, NET_IP_ALIGN); 299617a20bbSIsaku Yamahata 3000d160211SJeremy Fitzhardinge page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 3010d160211SJeremy Fitzhardinge if (!page) { 3020d160211SJeremy Fitzhardinge kfree_skb(skb); 3030d160211SJeremy Fitzhardinge no_skb: 3040d160211SJeremy Fitzhardinge /* Could not allocate any skbuffs. Try again later. */ 3052688fcb7SAndrew J. Bennieston mod_timer(&queue->rx_refill_timer, 3060d160211SJeremy Fitzhardinge jiffies + (HZ/10)); 307fdcf7765SMa JieYue 308fdcf7765SMa JieYue /* Any skbuffs queued for refill? Force them out. */ 309fdcf7765SMa JieYue if (i != 0) 310fdcf7765SMa JieYue goto refill; 3110d160211SJeremy Fitzhardinge break; 3120d160211SJeremy Fitzhardinge } 3130d160211SJeremy Fitzhardinge 314093b9c71SJan Beulich skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 3152688fcb7SAndrew J. Bennieston __skb_queue_tail(&queue->rx_batch, skb); 3160d160211SJeremy Fitzhardinge } 3170d160211SJeremy Fitzhardinge 3180d160211SJeremy Fitzhardinge /* Is the batch large enough to be worthwhile? */ 3192688fcb7SAndrew J. Bennieston if (i < (queue->rx_target/2)) { 3202688fcb7SAndrew J. Bennieston if (req_prod > queue->rx.sring->req_prod) 3210d160211SJeremy Fitzhardinge goto push; 3220d160211SJeremy Fitzhardinge return; 3230d160211SJeremy Fitzhardinge } 3240d160211SJeremy Fitzhardinge 3250d160211SJeremy Fitzhardinge /* Adjust our fill target if we risked running out of buffers. */ 3262688fcb7SAndrew J. Bennieston if (((req_prod - queue->rx.sring->rsp_prod) < (queue->rx_target / 4)) && 3272688fcb7SAndrew J. Bennieston ((queue->rx_target *= 2) > queue->rx_max_target)) 3282688fcb7SAndrew J. Bennieston queue->rx_target = queue->rx_max_target; 3290d160211SJeremy Fitzhardinge 3300d160211SJeremy Fitzhardinge refill: 3315dcddfaeSJeremy Fitzhardinge for (i = 0; ; i++) { 3322688fcb7SAndrew J. Bennieston skb = __skb_dequeue(&queue->rx_batch); 3330d160211SJeremy Fitzhardinge if (skb == NULL) 3340d160211SJeremy Fitzhardinge break; 3350d160211SJeremy Fitzhardinge 3362688fcb7SAndrew J. Bennieston skb->dev = queue->info->netdev; 3370d160211SJeremy Fitzhardinge 3380d160211SJeremy Fitzhardinge id = xennet_rxidx(req_prod + i); 3390d160211SJeremy Fitzhardinge 3402688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[id]); 3412688fcb7SAndrew J. Bennieston queue->rx_skbs[id] = skb; 3420d160211SJeremy Fitzhardinge 3432688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 3440d160211SJeremy Fitzhardinge BUG_ON((signed short)ref < 0); 3452688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = ref; 3460d160211SJeremy Fitzhardinge 34701c68026SIan Campbell pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 34801c68026SIan Campbell vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0])); 3490d160211SJeremy Fitzhardinge 3502688fcb7SAndrew J. Bennieston req = RING_GET_REQUEST(&queue->rx, req_prod + i); 3510d160211SJeremy Fitzhardinge gnttab_grant_foreign_access_ref(ref, 3522688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id, 3530d160211SJeremy Fitzhardinge pfn_to_mfn(pfn), 3540d160211SJeremy Fitzhardinge 0); 3550d160211SJeremy Fitzhardinge 3560d160211SJeremy Fitzhardinge req->id = id; 3570d160211SJeremy Fitzhardinge req->gref = ref; 3580d160211SJeremy Fitzhardinge } 3590d160211SJeremy Fitzhardinge 3600d160211SJeremy Fitzhardinge wmb(); /* barrier so backend seens requests */ 3610d160211SJeremy Fitzhardinge 3620d160211SJeremy Fitzhardinge /* Above is a suitable barrier to ensure backend will see requests. */ 3632688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt = req_prod + i; 3640d160211SJeremy Fitzhardinge push: 3652688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 3660d160211SJeremy Fitzhardinge if (notify) 3672688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 3680d160211SJeremy Fitzhardinge } 3690d160211SJeremy Fitzhardinge 3700d160211SJeremy Fitzhardinge static int xennet_open(struct net_device *dev) 3710d160211SJeremy Fitzhardinge { 3720d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 3732688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 3742688fcb7SAndrew J. Bennieston unsigned int i = 0; 3752688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 3760d160211SJeremy Fitzhardinge 3772688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 3782688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 3792688fcb7SAndrew J. Bennieston napi_enable(&queue->napi); 380bea3348eSStephen Hemminger 3812688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 3820d160211SJeremy Fitzhardinge if (netif_carrier_ok(dev)) { 3832688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 3842688fcb7SAndrew J. Bennieston queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 3852688fcb7SAndrew J. Bennieston if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 3862688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 3870d160211SJeremy Fitzhardinge } 3882688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 3892688fcb7SAndrew J. Bennieston } 3900d160211SJeremy Fitzhardinge 3912688fcb7SAndrew J. Bennieston netif_tx_start_all_queues(dev); 3920d160211SJeremy Fitzhardinge 3930d160211SJeremy Fitzhardinge return 0; 3940d160211SJeremy Fitzhardinge } 3950d160211SJeremy Fitzhardinge 3962688fcb7SAndrew J. Bennieston static void xennet_tx_buf_gc(struct netfront_queue *queue) 3970d160211SJeremy Fitzhardinge { 3980d160211SJeremy Fitzhardinge RING_IDX cons, prod; 3990d160211SJeremy Fitzhardinge unsigned short id; 4000d160211SJeremy Fitzhardinge struct sk_buff *skb; 4010d160211SJeremy Fitzhardinge 4022688fcb7SAndrew J. Bennieston BUG_ON(!netif_carrier_ok(queue->info->netdev)); 4030d160211SJeremy Fitzhardinge 4040d160211SJeremy Fitzhardinge do { 4052688fcb7SAndrew J. Bennieston prod = queue->tx.sring->rsp_prod; 4060d160211SJeremy Fitzhardinge rmb(); /* Ensure we see responses up to 'rp'. */ 4070d160211SJeremy Fitzhardinge 4082688fcb7SAndrew J. Bennieston for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 4090d160211SJeremy Fitzhardinge struct xen_netif_tx_response *txrsp; 4100d160211SJeremy Fitzhardinge 4112688fcb7SAndrew J. Bennieston txrsp = RING_GET_RESPONSE(&queue->tx, cons); 412f942dc25SIan Campbell if (txrsp->status == XEN_NETIF_RSP_NULL) 4130d160211SJeremy Fitzhardinge continue; 4140d160211SJeremy Fitzhardinge 4150d160211SJeremy Fitzhardinge id = txrsp->id; 4162688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[id].skb; 4170d160211SJeremy Fitzhardinge if (unlikely(gnttab_query_foreign_access( 4182688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id]) != 0)) { 419383eda32SJoe Perches pr_alert("%s: warning -- grant still in use by backend domain\n", 420383eda32SJoe Perches __func__); 4210d160211SJeremy Fitzhardinge BUG(); 4220d160211SJeremy Fitzhardinge } 4230d160211SJeremy Fitzhardinge gnttab_end_foreign_access_ref( 4242688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id], GNTMAP_readonly); 4250d160211SJeremy Fitzhardinge gnttab_release_grant_reference( 4262688fcb7SAndrew J. Bennieston &queue->gref_tx_head, queue->grant_tx_ref[id]); 4272688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id] = GRANT_INVALID_REF; 4282688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = NULL; 4292688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); 4300d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 4310d160211SJeremy Fitzhardinge } 4320d160211SJeremy Fitzhardinge 4332688fcb7SAndrew J. Bennieston queue->tx.rsp_cons = prod; 4340d160211SJeremy Fitzhardinge 4350d160211SJeremy Fitzhardinge /* 4360d160211SJeremy Fitzhardinge * Set a new event, then check for race with update of tx_cons. 4370d160211SJeremy Fitzhardinge * Note that it is essential to schedule a callback, no matter 4380d160211SJeremy Fitzhardinge * how few buffers are pending. Even if there is space in the 4390d160211SJeremy Fitzhardinge * transmit ring, higher layers may be blocked because too much 4400d160211SJeremy Fitzhardinge * data is outstanding: in such cases notification from Xen is 4410d160211SJeremy Fitzhardinge * likely to be the only kick that we'll get. 4420d160211SJeremy Fitzhardinge */ 4432688fcb7SAndrew J. Bennieston queue->tx.sring->rsp_event = 4442688fcb7SAndrew J. Bennieston prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; 4450d160211SJeremy Fitzhardinge mb(); /* update shared area */ 4462688fcb7SAndrew J. Bennieston } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); 4470d160211SJeremy Fitzhardinge 4482688fcb7SAndrew J. Bennieston xennet_maybe_wake_tx(queue); 4490d160211SJeremy Fitzhardinge } 4500d160211SJeremy Fitzhardinge 4512688fcb7SAndrew J. Bennieston static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue, 4520d160211SJeremy Fitzhardinge struct xen_netif_tx_request *tx) 4530d160211SJeremy Fitzhardinge { 4540d160211SJeremy Fitzhardinge char *data = skb->data; 4550d160211SJeremy Fitzhardinge unsigned long mfn; 4562688fcb7SAndrew J. Bennieston RING_IDX prod = queue->tx.req_prod_pvt; 4570d160211SJeremy Fitzhardinge int frags = skb_shinfo(skb)->nr_frags; 4580d160211SJeremy Fitzhardinge unsigned int offset = offset_in_page(data); 4590d160211SJeremy Fitzhardinge unsigned int len = skb_headlen(skb); 4600d160211SJeremy Fitzhardinge unsigned int id; 4610d160211SJeremy Fitzhardinge grant_ref_t ref; 4620d160211SJeremy Fitzhardinge int i; 4630d160211SJeremy Fitzhardinge 4640d160211SJeremy Fitzhardinge /* While the header overlaps a page boundary (including being 4650d160211SJeremy Fitzhardinge larger than a page), split it it into page-sized chunks. */ 4660d160211SJeremy Fitzhardinge while (len > PAGE_SIZE - offset) { 4670d160211SJeremy Fitzhardinge tx->size = PAGE_SIZE - offset; 468f942dc25SIan Campbell tx->flags |= XEN_NETTXF_more_data; 4690d160211SJeremy Fitzhardinge len -= tx->size; 4700d160211SJeremy Fitzhardinge data += tx->size; 4710d160211SJeremy Fitzhardinge offset = 0; 4720d160211SJeremy Fitzhardinge 4732688fcb7SAndrew J. Bennieston id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 4742688fcb7SAndrew J. Bennieston queue->tx_skbs[id].skb = skb_get(skb); 4752688fcb7SAndrew J. Bennieston tx = RING_GET_REQUEST(&queue->tx, prod++); 4760d160211SJeremy Fitzhardinge tx->id = id; 4772688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 4780d160211SJeremy Fitzhardinge BUG_ON((signed short)ref < 0); 4790d160211SJeremy Fitzhardinge 4800d160211SJeremy Fitzhardinge mfn = virt_to_mfn(data); 4812688fcb7SAndrew J. Bennieston gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 4820d160211SJeremy Fitzhardinge mfn, GNTMAP_readonly); 4830d160211SJeremy Fitzhardinge 4842688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = virt_to_page(data); 4852688fcb7SAndrew J. Bennieston tx->gref = queue->grant_tx_ref[id] = ref; 4860d160211SJeremy Fitzhardinge tx->offset = offset; 4870d160211SJeremy Fitzhardinge tx->size = len; 4880d160211SJeremy Fitzhardinge tx->flags = 0; 4890d160211SJeremy Fitzhardinge } 4900d160211SJeremy Fitzhardinge 4910d160211SJeremy Fitzhardinge /* Grant backend access to each skb fragment page. */ 4920d160211SJeremy Fitzhardinge for (i = 0; i < frags; i++) { 4930d160211SJeremy Fitzhardinge skb_frag_t *frag = skb_shinfo(skb)->frags + i; 494f36c3747SIan Campbell struct page *page = skb_frag_page(frag); 495f36c3747SIan Campbell 496f36c3747SIan Campbell len = skb_frag_size(frag); 497f36c3747SIan Campbell offset = frag->page_offset; 498f36c3747SIan Campbell 499f36c3747SIan Campbell /* Data must not cross a page boundary. */ 500f36c3747SIan Campbell BUG_ON(len + offset > PAGE_SIZE<<compound_order(page)); 501f36c3747SIan Campbell 502f36c3747SIan Campbell /* Skip unused frames from start of page */ 503f36c3747SIan Campbell page += offset >> PAGE_SHIFT; 504f36c3747SIan Campbell offset &= ~PAGE_MASK; 505f36c3747SIan Campbell 506f36c3747SIan Campbell while (len > 0) { 507f36c3747SIan Campbell unsigned long bytes; 508f36c3747SIan Campbell 509f36c3747SIan Campbell BUG_ON(offset >= PAGE_SIZE); 510f36c3747SIan Campbell 511f36c3747SIan Campbell bytes = PAGE_SIZE - offset; 512f36c3747SIan Campbell if (bytes > len) 513f36c3747SIan Campbell bytes = len; 5140d160211SJeremy Fitzhardinge 515f942dc25SIan Campbell tx->flags |= XEN_NETTXF_more_data; 5160d160211SJeremy Fitzhardinge 5172688fcb7SAndrew J. Bennieston id = get_id_from_freelist(&queue->tx_skb_freelist, 5182688fcb7SAndrew J. Bennieston queue->tx_skbs); 5192688fcb7SAndrew J. Bennieston queue->tx_skbs[id].skb = skb_get(skb); 5202688fcb7SAndrew J. Bennieston tx = RING_GET_REQUEST(&queue->tx, prod++); 5210d160211SJeremy Fitzhardinge tx->id = id; 5222688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 5230d160211SJeremy Fitzhardinge BUG_ON((signed short)ref < 0); 5240d160211SJeremy Fitzhardinge 525f36c3747SIan Campbell mfn = pfn_to_mfn(page_to_pfn(page)); 526f36c3747SIan Campbell gnttab_grant_foreign_access_ref(ref, 5272688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id, 5280d160211SJeremy Fitzhardinge mfn, GNTMAP_readonly); 5290d160211SJeremy Fitzhardinge 5302688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = page; 5312688fcb7SAndrew J. Bennieston tx->gref = queue->grant_tx_ref[id] = ref; 532f36c3747SIan Campbell tx->offset = offset; 533f36c3747SIan Campbell tx->size = bytes; 5340d160211SJeremy Fitzhardinge tx->flags = 0; 535f36c3747SIan Campbell 536f36c3747SIan Campbell offset += bytes; 537f36c3747SIan Campbell len -= bytes; 538f36c3747SIan Campbell 539f36c3747SIan Campbell /* Next frame */ 540f36c3747SIan Campbell if (offset == PAGE_SIZE && len) { 541f36c3747SIan Campbell BUG_ON(!PageCompound(page)); 542f36c3747SIan Campbell page++; 543f36c3747SIan Campbell offset = 0; 544f36c3747SIan Campbell } 545f36c3747SIan Campbell } 5460d160211SJeremy Fitzhardinge } 5470d160211SJeremy Fitzhardinge 5482688fcb7SAndrew J. Bennieston queue->tx.req_prod_pvt = prod; 5490d160211SJeremy Fitzhardinge } 5500d160211SJeremy Fitzhardinge 551f36c3747SIan Campbell /* 552f36c3747SIan Campbell * Count how many ring slots are required to send the frags of this 553f36c3747SIan Campbell * skb. Each frag might be a compound page. 554f36c3747SIan Campbell */ 555f36c3747SIan Campbell static int xennet_count_skb_frag_slots(struct sk_buff *skb) 556f36c3747SIan Campbell { 557f36c3747SIan Campbell int i, frags = skb_shinfo(skb)->nr_frags; 558f36c3747SIan Campbell int pages = 0; 559f36c3747SIan Campbell 560f36c3747SIan Campbell for (i = 0; i < frags; i++) { 561f36c3747SIan Campbell skb_frag_t *frag = skb_shinfo(skb)->frags + i; 562f36c3747SIan Campbell unsigned long size = skb_frag_size(frag); 563f36c3747SIan Campbell unsigned long offset = frag->page_offset; 564f36c3747SIan Campbell 565f36c3747SIan Campbell /* Skip unused frames from start of page */ 566f36c3747SIan Campbell offset &= ~PAGE_MASK; 567f36c3747SIan Campbell 568f36c3747SIan Campbell pages += PFN_UP(offset + size); 569f36c3747SIan Campbell } 570f36c3747SIan Campbell 571f36c3747SIan Campbell return pages; 572f36c3747SIan Campbell } 573f36c3747SIan Campbell 57450ee6061SAndrew J. Bennieston static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 57550ee6061SAndrew J. Bennieston void *accel_priv, select_queue_fallback_t fallback) 5762688fcb7SAndrew J. Bennieston { 57750ee6061SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 57850ee6061SAndrew J. Bennieston u32 hash; 57950ee6061SAndrew J. Bennieston u16 queue_idx; 58050ee6061SAndrew J. Bennieston 58150ee6061SAndrew J. Bennieston /* First, check if there is only one queue */ 58250ee6061SAndrew J. Bennieston if (num_queues == 1) { 58350ee6061SAndrew J. Bennieston queue_idx = 0; 58450ee6061SAndrew J. Bennieston } else { 58550ee6061SAndrew J. Bennieston hash = skb_get_hash(skb); 58650ee6061SAndrew J. Bennieston queue_idx = hash % num_queues; 58750ee6061SAndrew J. Bennieston } 58850ee6061SAndrew J. Bennieston 58950ee6061SAndrew J. Bennieston return queue_idx; 5902688fcb7SAndrew J. Bennieston } 5912688fcb7SAndrew J. Bennieston 5920d160211SJeremy Fitzhardinge static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 5930d160211SJeremy Fitzhardinge { 5940d160211SJeremy Fitzhardinge unsigned short id; 5950d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 596e00f85beSstephen hemminger struct netfront_stats *stats = this_cpu_ptr(np->stats); 5970d160211SJeremy Fitzhardinge struct xen_netif_tx_request *tx; 5980d160211SJeremy Fitzhardinge char *data = skb->data; 5990d160211SJeremy Fitzhardinge RING_IDX i; 6000d160211SJeremy Fitzhardinge grant_ref_t ref; 6010d160211SJeremy Fitzhardinge unsigned long mfn; 6020d160211SJeremy Fitzhardinge int notify; 603f36c3747SIan Campbell int slots; 6040d160211SJeremy Fitzhardinge unsigned int offset = offset_in_page(data); 6050d160211SJeremy Fitzhardinge unsigned int len = skb_headlen(skb); 606cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 6072688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 6082688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 6092688fcb7SAndrew J. Bennieston u16 queue_index; 6102688fcb7SAndrew J. Bennieston 6112688fcb7SAndrew J. Bennieston /* Drop the packet if no queues are set up */ 6122688fcb7SAndrew J. Bennieston if (num_queues < 1) 6132688fcb7SAndrew J. Bennieston goto drop; 6142688fcb7SAndrew J. Bennieston /* Determine which queue to transmit this SKB on */ 6152688fcb7SAndrew J. Bennieston queue_index = skb_get_queue_mapping(skb); 6162688fcb7SAndrew J. Bennieston queue = &np->queues[queue_index]; 6170d160211SJeremy Fitzhardinge 6189ecd1a75SWei Liu /* If skb->len is too big for wire format, drop skb and alert 6199ecd1a75SWei Liu * user about misconfiguration. 6209ecd1a75SWei Liu */ 6219ecd1a75SWei Liu if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 6229ecd1a75SWei Liu net_alert_ratelimited( 6239ecd1a75SWei Liu "xennet: skb->len = %u, too big for wire format\n", 6249ecd1a75SWei Liu skb->len); 6259ecd1a75SWei Liu goto drop; 6269ecd1a75SWei Liu } 6279ecd1a75SWei Liu 628f36c3747SIan Campbell slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) + 629f36c3747SIan Campbell xennet_count_skb_frag_slots(skb); 630f36c3747SIan Campbell if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 631f36c3747SIan Campbell net_alert_ratelimited( 632f36c3747SIan Campbell "xennet: skb rides the rocket: %d slots\n", slots); 6330d160211SJeremy Fitzhardinge goto drop; 6340d160211SJeremy Fitzhardinge } 6350d160211SJeremy Fitzhardinge 6362688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 6370d160211SJeremy Fitzhardinge 6380d160211SJeremy Fitzhardinge if (unlikely(!netif_carrier_ok(dev) || 639f36c3747SIan Campbell (slots > 1 && !xennet_can_sg(dev)) || 640fc741216SJesse Gross netif_needs_gso(skb, netif_skb_features(skb)))) { 6412688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 6420d160211SJeremy Fitzhardinge goto drop; 6430d160211SJeremy Fitzhardinge } 6440d160211SJeremy Fitzhardinge 6452688fcb7SAndrew J. Bennieston i = queue->tx.req_prod_pvt; 6460d160211SJeremy Fitzhardinge 6472688fcb7SAndrew J. Bennieston id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 6482688fcb7SAndrew J. Bennieston queue->tx_skbs[id].skb = skb; 6490d160211SJeremy Fitzhardinge 6502688fcb7SAndrew J. Bennieston tx = RING_GET_REQUEST(&queue->tx, i); 6510d160211SJeremy Fitzhardinge 6520d160211SJeremy Fitzhardinge tx->id = id; 6532688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 6540d160211SJeremy Fitzhardinge BUG_ON((signed short)ref < 0); 6550d160211SJeremy Fitzhardinge mfn = virt_to_mfn(data); 6560d160211SJeremy Fitzhardinge gnttab_grant_foreign_access_ref( 6572688fcb7SAndrew J. Bennieston ref, queue->info->xbdev->otherend_id, mfn, GNTMAP_readonly); 6582688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = virt_to_page(data); 6592688fcb7SAndrew J. Bennieston tx->gref = queue->grant_tx_ref[id] = ref; 6600d160211SJeremy Fitzhardinge tx->offset = offset; 6610d160211SJeremy Fitzhardinge tx->size = len; 6620d160211SJeremy Fitzhardinge 6630d160211SJeremy Fitzhardinge tx->flags = 0; 6640d160211SJeremy Fitzhardinge if (skb->ip_summed == CHECKSUM_PARTIAL) 6650d160211SJeremy Fitzhardinge /* local packet? */ 666f942dc25SIan Campbell tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 6670d160211SJeremy Fitzhardinge else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 6680d160211SJeremy Fitzhardinge /* remote but checksummed. */ 669f942dc25SIan Campbell tx->flags |= XEN_NETTXF_data_validated; 6700d160211SJeremy Fitzhardinge 6710d160211SJeremy Fitzhardinge if (skb_shinfo(skb)->gso_size) { 6720d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 6730d160211SJeremy Fitzhardinge 6740d160211SJeremy Fitzhardinge gso = (struct xen_netif_extra_info *) 6752688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->tx, ++i); 6760d160211SJeremy Fitzhardinge 677f942dc25SIan Campbell tx->flags |= XEN_NETTXF_extra_info; 6780d160211SJeremy Fitzhardinge 6790d160211SJeremy Fitzhardinge gso->u.gso.size = skb_shinfo(skb)->gso_size; 6802c0057deSPaul Durrant gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 6812c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV6 : 6822c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV4; 6830d160211SJeremy Fitzhardinge gso->u.gso.pad = 0; 6840d160211SJeremy Fitzhardinge gso->u.gso.features = 0; 6850d160211SJeremy Fitzhardinge 6860d160211SJeremy Fitzhardinge gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 6870d160211SJeremy Fitzhardinge gso->flags = 0; 6880d160211SJeremy Fitzhardinge } 6890d160211SJeremy Fitzhardinge 6902688fcb7SAndrew J. Bennieston queue->tx.req_prod_pvt = i + 1; 6910d160211SJeremy Fitzhardinge 6922688fcb7SAndrew J. Bennieston xennet_make_frags(skb, queue, tx); 6930d160211SJeremy Fitzhardinge tx->size = skb->len; 6940d160211SJeremy Fitzhardinge 6952688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 6960d160211SJeremy Fitzhardinge if (notify) 6972688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 6980d160211SJeremy Fitzhardinge 699e00f85beSstephen hemminger u64_stats_update_begin(&stats->syncp); 700e00f85beSstephen hemminger stats->tx_bytes += skb->len; 701e00f85beSstephen hemminger stats->tx_packets++; 702e00f85beSstephen hemminger u64_stats_update_end(&stats->syncp); 70310a273a6SJeremy Fitzhardinge 70410a273a6SJeremy Fitzhardinge /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 7052688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 7060d160211SJeremy Fitzhardinge 7072688fcb7SAndrew J. Bennieston if (!netfront_tx_slot_available(queue)) 7082688fcb7SAndrew J. Bennieston netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 7090d160211SJeremy Fitzhardinge 7102688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 7110d160211SJeremy Fitzhardinge 7126ed10654SPatrick McHardy return NETDEV_TX_OK; 7130d160211SJeremy Fitzhardinge 7140d160211SJeremy Fitzhardinge drop: 71509f75cd7SJeff Garzik dev->stats.tx_dropped++; 716979de8a0SEric W. Biederman dev_kfree_skb_any(skb); 7176ed10654SPatrick McHardy return NETDEV_TX_OK; 7180d160211SJeremy Fitzhardinge } 7190d160211SJeremy Fitzhardinge 7200d160211SJeremy Fitzhardinge static int xennet_close(struct net_device *dev) 7210d160211SJeremy Fitzhardinge { 7220d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 7232688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 7242688fcb7SAndrew J. Bennieston unsigned int i; 7252688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 7262688fcb7SAndrew J. Bennieston netif_tx_stop_all_queues(np->netdev); 7272688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 7282688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 7292688fcb7SAndrew J. Bennieston napi_disable(&queue->napi); 7302688fcb7SAndrew J. Bennieston } 7310d160211SJeremy Fitzhardinge return 0; 7320d160211SJeremy Fitzhardinge } 7330d160211SJeremy Fitzhardinge 7342688fcb7SAndrew J. Bennieston static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 7350d160211SJeremy Fitzhardinge grant_ref_t ref) 7360d160211SJeremy Fitzhardinge { 7372688fcb7SAndrew J. Bennieston int new = xennet_rxidx(queue->rx.req_prod_pvt); 7380d160211SJeremy Fitzhardinge 7392688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[new]); 7402688fcb7SAndrew J. Bennieston queue->rx_skbs[new] = skb; 7412688fcb7SAndrew J. Bennieston queue->grant_rx_ref[new] = ref; 7422688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 7432688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 7442688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt++; 7450d160211SJeremy Fitzhardinge } 7460d160211SJeremy Fitzhardinge 7472688fcb7SAndrew J. Bennieston static int xennet_get_extras(struct netfront_queue *queue, 7480d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras, 7490d160211SJeremy Fitzhardinge RING_IDX rp) 7500d160211SJeremy Fitzhardinge 7510d160211SJeremy Fitzhardinge { 7520d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extra; 7532688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7542688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7550d160211SJeremy Fitzhardinge int err = 0; 7560d160211SJeremy Fitzhardinge 7570d160211SJeremy Fitzhardinge do { 7580d160211SJeremy Fitzhardinge struct sk_buff *skb; 7590d160211SJeremy Fitzhardinge grant_ref_t ref; 7600d160211SJeremy Fitzhardinge 7610d160211SJeremy Fitzhardinge if (unlikely(cons + 1 == rp)) { 7620d160211SJeremy Fitzhardinge if (net_ratelimit()) 7630d160211SJeremy Fitzhardinge dev_warn(dev, "Missing extra info\n"); 7640d160211SJeremy Fitzhardinge err = -EBADR; 7650d160211SJeremy Fitzhardinge break; 7660d160211SJeremy Fitzhardinge } 7670d160211SJeremy Fitzhardinge 7680d160211SJeremy Fitzhardinge extra = (struct xen_netif_extra_info *) 7692688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 7700d160211SJeremy Fitzhardinge 7710d160211SJeremy Fitzhardinge if (unlikely(!extra->type || 7720d160211SJeremy Fitzhardinge extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 7730d160211SJeremy Fitzhardinge if (net_ratelimit()) 7740d160211SJeremy Fitzhardinge dev_warn(dev, "Invalid extra type: %d\n", 7750d160211SJeremy Fitzhardinge extra->type); 7760d160211SJeremy Fitzhardinge err = -EINVAL; 7770d160211SJeremy Fitzhardinge } else { 7780d160211SJeremy Fitzhardinge memcpy(&extras[extra->type - 1], extra, 7790d160211SJeremy Fitzhardinge sizeof(*extra)); 7800d160211SJeremy Fitzhardinge } 7810d160211SJeremy Fitzhardinge 7822688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons); 7832688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons); 7842688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 7850d160211SJeremy Fitzhardinge } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 7860d160211SJeremy Fitzhardinge 7872688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons; 7880d160211SJeremy Fitzhardinge return err; 7890d160211SJeremy Fitzhardinge } 7900d160211SJeremy Fitzhardinge 7912688fcb7SAndrew J. Bennieston static int xennet_get_responses(struct netfront_queue *queue, 7920d160211SJeremy Fitzhardinge struct netfront_rx_info *rinfo, RING_IDX rp, 7930d160211SJeremy Fitzhardinge struct sk_buff_head *list) 7940d160211SJeremy Fitzhardinge { 7950d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo->rx; 7960d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo->extras; 7972688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7982688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7992688fcb7SAndrew J. Bennieston struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 8002688fcb7SAndrew J. Bennieston grant_ref_t ref = xennet_get_rx_ref(queue, cons); 8010d160211SJeremy Fitzhardinge int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 8027158ff6dSWei Liu int slots = 1; 8030d160211SJeremy Fitzhardinge int err = 0; 8040d160211SJeremy Fitzhardinge unsigned long ret; 8050d160211SJeremy Fitzhardinge 806f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_extra_info) { 8072688fcb7SAndrew J. Bennieston err = xennet_get_extras(queue, extras, rp); 8082688fcb7SAndrew J. Bennieston cons = queue->rx.rsp_cons; 8090d160211SJeremy Fitzhardinge } 8100d160211SJeremy Fitzhardinge 8110d160211SJeremy Fitzhardinge for (;;) { 8120d160211SJeremy Fitzhardinge if (unlikely(rx->status < 0 || 8130d160211SJeremy Fitzhardinge rx->offset + rx->status > PAGE_SIZE)) { 8140d160211SJeremy Fitzhardinge if (net_ratelimit()) 8150d160211SJeremy Fitzhardinge dev_warn(dev, "rx->offset: %x, size: %u\n", 8160d160211SJeremy Fitzhardinge rx->offset, rx->status); 8172688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 8180d160211SJeremy Fitzhardinge err = -EINVAL; 8190d160211SJeremy Fitzhardinge goto next; 8200d160211SJeremy Fitzhardinge } 8210d160211SJeremy Fitzhardinge 8220d160211SJeremy Fitzhardinge /* 8230d160211SJeremy Fitzhardinge * This definitely indicates a bug, either in this driver or in 8240d160211SJeremy Fitzhardinge * the backend driver. In future this should flag the bad 825697089dcSWei Liu * situation to the system controller to reboot the backend. 8260d160211SJeremy Fitzhardinge */ 8270d160211SJeremy Fitzhardinge if (ref == GRANT_INVALID_REF) { 8280d160211SJeremy Fitzhardinge if (net_ratelimit()) 8290d160211SJeremy Fitzhardinge dev_warn(dev, "Bad rx response id %d.\n", 8300d160211SJeremy Fitzhardinge rx->id); 8310d160211SJeremy Fitzhardinge err = -EINVAL; 8320d160211SJeremy Fitzhardinge goto next; 8330d160211SJeremy Fitzhardinge } 8340d160211SJeremy Fitzhardinge 8350d160211SJeremy Fitzhardinge ret = gnttab_end_foreign_access_ref(ref, 0); 8360d160211SJeremy Fitzhardinge BUG_ON(!ret); 8370d160211SJeremy Fitzhardinge 8382688fcb7SAndrew J. Bennieston gnttab_release_grant_reference(&queue->gref_rx_head, ref); 8390d160211SJeremy Fitzhardinge 8400d160211SJeremy Fitzhardinge __skb_queue_tail(list, skb); 8410d160211SJeremy Fitzhardinge 8420d160211SJeremy Fitzhardinge next: 843f942dc25SIan Campbell if (!(rx->flags & XEN_NETRXF_more_data)) 8440d160211SJeremy Fitzhardinge break; 8450d160211SJeremy Fitzhardinge 8467158ff6dSWei Liu if (cons + slots == rp) { 8470d160211SJeremy Fitzhardinge if (net_ratelimit()) 8487158ff6dSWei Liu dev_warn(dev, "Need more slots\n"); 8490d160211SJeremy Fitzhardinge err = -ENOENT; 8500d160211SJeremy Fitzhardinge break; 8510d160211SJeremy Fitzhardinge } 8520d160211SJeremy Fitzhardinge 8532688fcb7SAndrew J. Bennieston rx = RING_GET_RESPONSE(&queue->rx, cons + slots); 8542688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons + slots); 8552688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons + slots); 8567158ff6dSWei Liu slots++; 8570d160211SJeremy Fitzhardinge } 8580d160211SJeremy Fitzhardinge 8597158ff6dSWei Liu if (unlikely(slots > max)) { 8600d160211SJeremy Fitzhardinge if (net_ratelimit()) 861697089dcSWei Liu dev_warn(dev, "Too many slots\n"); 8620d160211SJeremy Fitzhardinge err = -E2BIG; 8630d160211SJeremy Fitzhardinge } 8640d160211SJeremy Fitzhardinge 8650d160211SJeremy Fitzhardinge if (unlikely(err)) 8662688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons + slots; 8670d160211SJeremy Fitzhardinge 8680d160211SJeremy Fitzhardinge return err; 8690d160211SJeremy Fitzhardinge } 8700d160211SJeremy Fitzhardinge 8710d160211SJeremy Fitzhardinge static int xennet_set_skb_gso(struct sk_buff *skb, 8720d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso) 8730d160211SJeremy Fitzhardinge { 8740d160211SJeremy Fitzhardinge if (!gso->u.gso.size) { 8750d160211SJeremy Fitzhardinge if (net_ratelimit()) 876383eda32SJoe Perches pr_warn("GSO size must not be zero\n"); 8770d160211SJeremy Fitzhardinge return -EINVAL; 8780d160211SJeremy Fitzhardinge } 8790d160211SJeremy Fitzhardinge 8802c0057deSPaul Durrant if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 8812c0057deSPaul Durrant gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 8820d160211SJeremy Fitzhardinge if (net_ratelimit()) 883383eda32SJoe Perches pr_warn("Bad GSO type %d\n", gso->u.gso.type); 8840d160211SJeremy Fitzhardinge return -EINVAL; 8850d160211SJeremy Fitzhardinge } 8860d160211SJeremy Fitzhardinge 8870d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_size = gso->u.gso.size; 8882c0057deSPaul Durrant skb_shinfo(skb)->gso_type = 8892c0057deSPaul Durrant (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 8902c0057deSPaul Durrant SKB_GSO_TCPV4 : 8912c0057deSPaul Durrant SKB_GSO_TCPV6; 8920d160211SJeremy Fitzhardinge 8930d160211SJeremy Fitzhardinge /* Header must be checked, and gso_segs computed. */ 8940d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 8950d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_segs = 0; 8960d160211SJeremy Fitzhardinge 8970d160211SJeremy Fitzhardinge return 0; 8980d160211SJeremy Fitzhardinge } 8990d160211SJeremy Fitzhardinge 9002688fcb7SAndrew J. Bennieston static RING_IDX xennet_fill_frags(struct netfront_queue *queue, 9010d160211SJeremy Fitzhardinge struct sk_buff *skb, 9020d160211SJeremy Fitzhardinge struct sk_buff_head *list) 9030d160211SJeremy Fitzhardinge { 9040d160211SJeremy Fitzhardinge struct skb_shared_info *shinfo = skb_shinfo(skb); 9052688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 9060d160211SJeremy Fitzhardinge struct sk_buff *nskb; 9070d160211SJeremy Fitzhardinge 9080d160211SJeremy Fitzhardinge while ((nskb = __skb_dequeue(list))) { 9090d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = 9102688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 91101c68026SIan Campbell skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 9120d160211SJeremy Fitzhardinge 913093b9c71SJan Beulich if (shinfo->nr_frags == MAX_SKB_FRAGS) { 914093b9c71SJan Beulich unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 9150d160211SJeremy Fitzhardinge 916093b9c71SJan Beulich BUG_ON(pull_to <= skb_headlen(skb)); 917093b9c71SJan Beulich __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 918093b9c71SJan Beulich } 919093b9c71SJan Beulich BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 920093b9c71SJan Beulich 921093b9c71SJan Beulich skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 922093b9c71SJan Beulich rx->offset, rx->status, PAGE_SIZE); 9230d160211SJeremy Fitzhardinge 9240d160211SJeremy Fitzhardinge skb_shinfo(nskb)->nr_frags = 0; 9250d160211SJeremy Fitzhardinge kfree_skb(nskb); 9260d160211SJeremy Fitzhardinge } 9270d160211SJeremy Fitzhardinge 9280d160211SJeremy Fitzhardinge return cons; 9290d160211SJeremy Fitzhardinge } 9300d160211SJeremy Fitzhardinge 931e0ce4af9SIan Campbell static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 9320d160211SJeremy Fitzhardinge { 933b5cf66cdSPaul Durrant bool recalculate_partial_csum = false; 934e0ce4af9SIan Campbell 935e0ce4af9SIan Campbell /* 936e0ce4af9SIan Campbell * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 937e0ce4af9SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO 938e0ce4af9SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and 939e0ce4af9SIan Campbell * recalculate the partial checksum. 940e0ce4af9SIan Campbell */ 941e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 942e0ce4af9SIan Campbell struct netfront_info *np = netdev_priv(dev); 9432688fcb7SAndrew J. Bennieston atomic_inc(&np->rx_gso_checksum_fixup); 944e0ce4af9SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL; 945b5cf66cdSPaul Durrant recalculate_partial_csum = true; 946e0ce4af9SIan Campbell } 947e0ce4af9SIan Campbell 948e0ce4af9SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 949e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL) 950e0ce4af9SIan Campbell return 0; 9510d160211SJeremy Fitzhardinge 952b5cf66cdSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum); 9530d160211SJeremy Fitzhardinge } 9540d160211SJeremy Fitzhardinge 9552688fcb7SAndrew J. Bennieston static int handle_incoming_queue(struct netfront_queue *queue, 9560d160211SJeremy Fitzhardinge struct sk_buff_head *rxq) 9570d160211SJeremy Fitzhardinge { 9582688fcb7SAndrew J. Bennieston struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); 9590d160211SJeremy Fitzhardinge int packets_dropped = 0; 9600d160211SJeremy Fitzhardinge struct sk_buff *skb; 9610d160211SJeremy Fitzhardinge 9620d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(rxq)) != NULL) { 9633683243bSIan Campbell int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 9640d160211SJeremy Fitzhardinge 965093b9c71SJan Beulich if (pull_to > skb_headlen(skb)) 9663683243bSIan Campbell __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 9670d160211SJeremy Fitzhardinge 9680d160211SJeremy Fitzhardinge /* Ethernet work: Delayed to here as it peeks the header. */ 9692688fcb7SAndrew J. Bennieston skb->protocol = eth_type_trans(skb, queue->info->netdev); 970d554f73dSWei Liu skb_reset_network_header(skb); 9710d160211SJeremy Fitzhardinge 9722688fcb7SAndrew J. Bennieston if (checksum_setup(queue->info->netdev, skb)) { 9730d160211SJeremy Fitzhardinge kfree_skb(skb); 9740d160211SJeremy Fitzhardinge packets_dropped++; 9752688fcb7SAndrew J. Bennieston queue->info->netdev->stats.rx_errors++; 9760d160211SJeremy Fitzhardinge continue; 9770d160211SJeremy Fitzhardinge } 9780d160211SJeremy Fitzhardinge 979e00f85beSstephen hemminger u64_stats_update_begin(&stats->syncp); 980e00f85beSstephen hemminger stats->rx_packets++; 981e00f85beSstephen hemminger stats->rx_bytes += skb->len; 982e00f85beSstephen hemminger u64_stats_update_end(&stats->syncp); 9830d160211SJeremy Fitzhardinge 9840d160211SJeremy Fitzhardinge /* Pass it up. */ 9852688fcb7SAndrew J. Bennieston napi_gro_receive(&queue->napi, skb); 9860d160211SJeremy Fitzhardinge } 9870d160211SJeremy Fitzhardinge 9880d160211SJeremy Fitzhardinge return packets_dropped; 9890d160211SJeremy Fitzhardinge } 9900d160211SJeremy Fitzhardinge 991bea3348eSStephen Hemminger static int xennet_poll(struct napi_struct *napi, int budget) 9920d160211SJeremy Fitzhardinge { 9932688fcb7SAndrew J. Bennieston struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 9942688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 9950d160211SJeremy Fitzhardinge struct sk_buff *skb; 9960d160211SJeremy Fitzhardinge struct netfront_rx_info rinfo; 9970d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo.rx; 9980d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo.extras; 9990d160211SJeremy Fitzhardinge RING_IDX i, rp; 1000bea3348eSStephen Hemminger int work_done; 10010d160211SJeremy Fitzhardinge struct sk_buff_head rxq; 10020d160211SJeremy Fitzhardinge struct sk_buff_head errq; 10030d160211SJeremy Fitzhardinge struct sk_buff_head tmpq; 10040d160211SJeremy Fitzhardinge unsigned long flags; 10050d160211SJeremy Fitzhardinge int err; 10060d160211SJeremy Fitzhardinge 10072688fcb7SAndrew J. Bennieston spin_lock(&queue->rx_lock); 10080d160211SJeremy Fitzhardinge 10090d160211SJeremy Fitzhardinge skb_queue_head_init(&rxq); 10100d160211SJeremy Fitzhardinge skb_queue_head_init(&errq); 10110d160211SJeremy Fitzhardinge skb_queue_head_init(&tmpq); 10120d160211SJeremy Fitzhardinge 10132688fcb7SAndrew J. Bennieston rp = queue->rx.sring->rsp_prod; 10140d160211SJeremy Fitzhardinge rmb(); /* Ensure we see queued responses up to 'rp'. */ 10150d160211SJeremy Fitzhardinge 10162688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 10170d160211SJeremy Fitzhardinge work_done = 0; 10180d160211SJeremy Fitzhardinge while ((i != rp) && (work_done < budget)) { 10192688fcb7SAndrew J. Bennieston memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); 10200d160211SJeremy Fitzhardinge memset(extras, 0, sizeof(rinfo.extras)); 10210d160211SJeremy Fitzhardinge 10222688fcb7SAndrew J. Bennieston err = xennet_get_responses(queue, &rinfo, rp, &tmpq); 10230d160211SJeremy Fitzhardinge 10240d160211SJeremy Fitzhardinge if (unlikely(err)) { 10250d160211SJeremy Fitzhardinge err: 10260d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(&tmpq))) 10270d160211SJeremy Fitzhardinge __skb_queue_tail(&errq, skb); 102809f75cd7SJeff Garzik dev->stats.rx_errors++; 10292688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 10300d160211SJeremy Fitzhardinge continue; 10310d160211SJeremy Fitzhardinge } 10320d160211SJeremy Fitzhardinge 10330d160211SJeremy Fitzhardinge skb = __skb_dequeue(&tmpq); 10340d160211SJeremy Fitzhardinge 10350d160211SJeremy Fitzhardinge if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 10360d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 10370d160211SJeremy Fitzhardinge gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 10380d160211SJeremy Fitzhardinge 10390d160211SJeremy Fitzhardinge if (unlikely(xennet_set_skb_gso(skb, gso))) { 10400d160211SJeremy Fitzhardinge __skb_queue_head(&tmpq, skb); 10412688fcb7SAndrew J. Bennieston queue->rx.rsp_cons += skb_queue_len(&tmpq); 10420d160211SJeremy Fitzhardinge goto err; 10430d160211SJeremy Fitzhardinge } 10440d160211SJeremy Fitzhardinge } 10450d160211SJeremy Fitzhardinge 10463683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = rx->status; 10473683243bSIan Campbell if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 10483683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 10490d160211SJeremy Fitzhardinge 10503683243bSIan Campbell skb_shinfo(skb)->frags[0].page_offset = rx->offset; 10513683243bSIan Campbell skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 10523683243bSIan Campbell skb->data_len = rx->status; 1053093b9c71SJan Beulich skb->len += rx->status; 10540d160211SJeremy Fitzhardinge 10552688fcb7SAndrew J. Bennieston i = xennet_fill_frags(queue, skb, &tmpq); 10560d160211SJeremy Fitzhardinge 1057f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_csum_blank) 10580d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_PARTIAL; 1059f942dc25SIan Campbell else if (rx->flags & XEN_NETRXF_data_validated) 10600d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_UNNECESSARY; 10610d160211SJeremy Fitzhardinge 10620d160211SJeremy Fitzhardinge __skb_queue_tail(&rxq, skb); 10630d160211SJeremy Fitzhardinge 10642688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = ++i; 10650d160211SJeremy Fitzhardinge work_done++; 10660d160211SJeremy Fitzhardinge } 10670d160211SJeremy Fitzhardinge 106856cfe5d0SWang Chen __skb_queue_purge(&errq); 10690d160211SJeremy Fitzhardinge 10702688fcb7SAndrew J. Bennieston work_done -= handle_incoming_queue(queue, &rxq); 10710d160211SJeremy Fitzhardinge 10720d160211SJeremy Fitzhardinge /* If we get a callback with very few responses, reduce fill target. */ 10730d160211SJeremy Fitzhardinge /* NB. Note exponential increase, linear decrease. */ 10742688fcb7SAndrew J. Bennieston if (((queue->rx.req_prod_pvt - queue->rx.sring->rsp_prod) > 10752688fcb7SAndrew J. Bennieston ((3*queue->rx_target) / 4)) && 10762688fcb7SAndrew J. Bennieston (--queue->rx_target < queue->rx_min_target)) 10772688fcb7SAndrew J. Bennieston queue->rx_target = queue->rx_min_target; 10780d160211SJeremy Fitzhardinge 10792688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 10800d160211SJeremy Fitzhardinge 10810d160211SJeremy Fitzhardinge if (work_done < budget) { 1082bea3348eSStephen Hemminger int more_to_do = 0; 1083bea3348eSStephen Hemminger 108499d3d587SWei Liu napi_gro_flush(napi, false); 108599d3d587SWei Liu 10860d160211SJeremy Fitzhardinge local_irq_save(flags); 10870d160211SJeremy Fitzhardinge 10882688fcb7SAndrew J. Bennieston RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 10890d160211SJeremy Fitzhardinge if (!more_to_do) 1090288379f0SBen Hutchings __napi_complete(napi); 10910d160211SJeremy Fitzhardinge 10920d160211SJeremy Fitzhardinge local_irq_restore(flags); 10930d160211SJeremy Fitzhardinge } 10940d160211SJeremy Fitzhardinge 10952688fcb7SAndrew J. Bennieston spin_unlock(&queue->rx_lock); 10960d160211SJeremy Fitzhardinge 1097bea3348eSStephen Hemminger return work_done; 10980d160211SJeremy Fitzhardinge } 10990d160211SJeremy Fitzhardinge 11000d160211SJeremy Fitzhardinge static int xennet_change_mtu(struct net_device *dev, int mtu) 11010d160211SJeremy Fitzhardinge { 11029ecd1a75SWei Liu int max = xennet_can_sg(dev) ? 11039ecd1a75SWei Liu XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN; 11040d160211SJeremy Fitzhardinge 11050d160211SJeremy Fitzhardinge if (mtu > max) 11060d160211SJeremy Fitzhardinge return -EINVAL; 11070d160211SJeremy Fitzhardinge dev->mtu = mtu; 11080d160211SJeremy Fitzhardinge return 0; 11090d160211SJeremy Fitzhardinge } 11100d160211SJeremy Fitzhardinge 1111e00f85beSstephen hemminger static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, 1112e00f85beSstephen hemminger struct rtnl_link_stats64 *tot) 1113e00f85beSstephen hemminger { 1114e00f85beSstephen hemminger struct netfront_info *np = netdev_priv(dev); 1115e00f85beSstephen hemminger int cpu; 1116e00f85beSstephen hemminger 1117e00f85beSstephen hemminger for_each_possible_cpu(cpu) { 1118e00f85beSstephen hemminger struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); 1119e00f85beSstephen hemminger u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1120e00f85beSstephen hemminger unsigned int start; 1121e00f85beSstephen hemminger 1122e00f85beSstephen hemminger do { 112357a7744eSEric W. Biederman start = u64_stats_fetch_begin_irq(&stats->syncp); 1124e00f85beSstephen hemminger 1125e00f85beSstephen hemminger rx_packets = stats->rx_packets; 1126e00f85beSstephen hemminger tx_packets = stats->tx_packets; 1127e00f85beSstephen hemminger rx_bytes = stats->rx_bytes; 1128e00f85beSstephen hemminger tx_bytes = stats->tx_bytes; 112957a7744eSEric W. Biederman } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); 1130e00f85beSstephen hemminger 1131e00f85beSstephen hemminger tot->rx_packets += rx_packets; 1132e00f85beSstephen hemminger tot->tx_packets += tx_packets; 1133e00f85beSstephen hemminger tot->rx_bytes += rx_bytes; 1134e00f85beSstephen hemminger tot->tx_bytes += tx_bytes; 1135e00f85beSstephen hemminger } 1136e00f85beSstephen hemminger 1137e00f85beSstephen hemminger tot->rx_errors = dev->stats.rx_errors; 1138e00f85beSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1139e00f85beSstephen hemminger 1140e00f85beSstephen hemminger return tot; 1141e00f85beSstephen hemminger } 1142e00f85beSstephen hemminger 11432688fcb7SAndrew J. Bennieston static void xennet_release_tx_bufs(struct netfront_queue *queue) 11440d160211SJeremy Fitzhardinge { 11450d160211SJeremy Fitzhardinge struct sk_buff *skb; 11460d160211SJeremy Fitzhardinge int i; 11470d160211SJeremy Fitzhardinge 11480d160211SJeremy Fitzhardinge for (i = 0; i < NET_TX_RING_SIZE; i++) { 11490d160211SJeremy Fitzhardinge /* Skip over entries which are actually freelist references */ 11502688fcb7SAndrew J. Bennieston if (skb_entry_is_link(&queue->tx_skbs[i])) 11510d160211SJeremy Fitzhardinge continue; 11520d160211SJeremy Fitzhardinge 11532688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[i].skb; 11542688fcb7SAndrew J. Bennieston get_page(queue->grant_tx_page[i]); 11552688fcb7SAndrew J. Bennieston gnttab_end_foreign_access(queue->grant_tx_ref[i], 1156cefe0078SAnnie Li GNTMAP_readonly, 11572688fcb7SAndrew J. Bennieston (unsigned long)page_address(queue->grant_tx_page[i])); 11582688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 11592688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 11602688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); 11610d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 11620d160211SJeremy Fitzhardinge } 11630d160211SJeremy Fitzhardinge } 11640d160211SJeremy Fitzhardinge 11652688fcb7SAndrew J. Bennieston static void xennet_release_rx_bufs(struct netfront_queue *queue) 11660d160211SJeremy Fitzhardinge { 11670d160211SJeremy Fitzhardinge int id, ref; 11680d160211SJeremy Fitzhardinge 11692688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 11700d160211SJeremy Fitzhardinge 11710d160211SJeremy Fitzhardinge for (id = 0; id < NET_RX_RING_SIZE; id++) { 1172cefe0078SAnnie Li struct sk_buff *skb; 1173cefe0078SAnnie Li struct page *page; 11740d160211SJeremy Fitzhardinge 11752688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[id]; 1176cefe0078SAnnie Li if (!skb) 1177cefe0078SAnnie Li continue; 1178cefe0078SAnnie Li 11792688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[id]; 1180cefe0078SAnnie Li if (ref == GRANT_INVALID_REF) 1181cefe0078SAnnie Li continue; 1182cefe0078SAnnie Li 1183cefe0078SAnnie Li page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1184cefe0078SAnnie Li 1185cefe0078SAnnie Li /* gnttab_end_foreign_access() needs a page ref until 1186cefe0078SAnnie Li * foreign access is ended (which may be deferred). 1187cefe0078SAnnie Li */ 1188cefe0078SAnnie Li get_page(page); 1189cefe0078SAnnie Li gnttab_end_foreign_access(ref, 0, 1190cefe0078SAnnie Li (unsigned long)page_address(page)); 11912688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = GRANT_INVALID_REF; 11920d160211SJeremy Fitzhardinge 1193cefe0078SAnnie Li kfree_skb(skb); 11940d160211SJeremy Fitzhardinge } 11950d160211SJeremy Fitzhardinge 11962688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 11970d160211SJeremy Fitzhardinge } 11980d160211SJeremy Fitzhardinge 11990d160211SJeremy Fitzhardinge static void xennet_uninit(struct net_device *dev) 12000d160211SJeremy Fitzhardinge { 12010d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 12022688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 12032688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 12042688fcb7SAndrew J. Bennieston unsigned int i; 12052688fcb7SAndrew J. Bennieston 12062688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 12072688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 12082688fcb7SAndrew J. Bennieston xennet_release_tx_bufs(queue); 12092688fcb7SAndrew J. Bennieston xennet_release_rx_bufs(queue); 12102688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head); 12112688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_rx_head); 12122688fcb7SAndrew J. Bennieston } 12130d160211SJeremy Fitzhardinge } 12140d160211SJeremy Fitzhardinge 1215c8f44affSMichał Mirosław static netdev_features_t xennet_fix_features(struct net_device *dev, 1216c8f44affSMichał Mirosław netdev_features_t features) 12178f7b01a1SEric Dumazet { 12188f7b01a1SEric Dumazet struct netfront_info *np = netdev_priv(dev); 12198f7b01a1SEric Dumazet int val; 12208f7b01a1SEric Dumazet 12218f7b01a1SEric Dumazet if (features & NETIF_F_SG) { 12228f7b01a1SEric Dumazet if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 12238f7b01a1SEric Dumazet "%d", &val) < 0) 12248f7b01a1SEric Dumazet val = 0; 12258f7b01a1SEric Dumazet 12268f7b01a1SEric Dumazet if (!val) 12278f7b01a1SEric Dumazet features &= ~NETIF_F_SG; 12288f7b01a1SEric Dumazet } 12298f7b01a1SEric Dumazet 12302c0057deSPaul Durrant if (features & NETIF_F_IPV6_CSUM) { 12312c0057deSPaul Durrant if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 12322c0057deSPaul Durrant "feature-ipv6-csum-offload", "%d", &val) < 0) 12332c0057deSPaul Durrant val = 0; 12342c0057deSPaul Durrant 12352c0057deSPaul Durrant if (!val) 12362c0057deSPaul Durrant features &= ~NETIF_F_IPV6_CSUM; 12372c0057deSPaul Durrant } 12382c0057deSPaul Durrant 12398f7b01a1SEric Dumazet if (features & NETIF_F_TSO) { 12408f7b01a1SEric Dumazet if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 12418f7b01a1SEric Dumazet "feature-gso-tcpv4", "%d", &val) < 0) 12428f7b01a1SEric Dumazet val = 0; 12438f7b01a1SEric Dumazet 12448f7b01a1SEric Dumazet if (!val) 12458f7b01a1SEric Dumazet features &= ~NETIF_F_TSO; 12468f7b01a1SEric Dumazet } 12478f7b01a1SEric Dumazet 12482c0057deSPaul Durrant if (features & NETIF_F_TSO6) { 12492c0057deSPaul Durrant if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 12502c0057deSPaul Durrant "feature-gso-tcpv6", "%d", &val) < 0) 12512c0057deSPaul Durrant val = 0; 12522c0057deSPaul Durrant 12532c0057deSPaul Durrant if (!val) 12542c0057deSPaul Durrant features &= ~NETIF_F_TSO6; 12552c0057deSPaul Durrant } 12562c0057deSPaul Durrant 12578f7b01a1SEric Dumazet return features; 12588f7b01a1SEric Dumazet } 12598f7b01a1SEric Dumazet 1260c8f44affSMichał Mirosław static int xennet_set_features(struct net_device *dev, 1261c8f44affSMichał Mirosław netdev_features_t features) 12628f7b01a1SEric Dumazet { 12638f7b01a1SEric Dumazet if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 12648f7b01a1SEric Dumazet netdev_info(dev, "Reducing MTU because no SG offload"); 12658f7b01a1SEric Dumazet dev->mtu = ETH_DATA_LEN; 12668f7b01a1SEric Dumazet } 12678f7b01a1SEric Dumazet 12688f7b01a1SEric Dumazet return 0; 12698f7b01a1SEric Dumazet } 12708f7b01a1SEric Dumazet 1271d634bf2cSWei Liu static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1272cf66f9d4SKonrad Rzeszutek Wilk { 12732688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 1274cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 1275cf66f9d4SKonrad Rzeszutek Wilk 12762688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 12772688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 12782688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 1279cf66f9d4SKonrad Rzeszutek Wilk 1280cf66f9d4SKonrad Rzeszutek Wilk return IRQ_HANDLED; 1281cf66f9d4SKonrad Rzeszutek Wilk } 1282cf66f9d4SKonrad Rzeszutek Wilk 1283d634bf2cSWei Liu static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1284d634bf2cSWei Liu { 12852688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 12862688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 1287d634bf2cSWei Liu 1288d634bf2cSWei Liu if (likely(netif_carrier_ok(dev) && 12892688fcb7SAndrew J. Bennieston RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 12902688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 1291d634bf2cSWei Liu 1292d634bf2cSWei Liu return IRQ_HANDLED; 1293d634bf2cSWei Liu } 1294d634bf2cSWei Liu 1295d634bf2cSWei Liu static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1296d634bf2cSWei Liu { 1297d634bf2cSWei Liu xennet_tx_interrupt(irq, dev_id); 1298d634bf2cSWei Liu xennet_rx_interrupt(irq, dev_id); 1299d634bf2cSWei Liu return IRQ_HANDLED; 1300d634bf2cSWei Liu } 1301d634bf2cSWei Liu 1302cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1303cf66f9d4SKonrad Rzeszutek Wilk static void xennet_poll_controller(struct net_device *dev) 1304cf66f9d4SKonrad Rzeszutek Wilk { 13052688fcb7SAndrew J. Bennieston /* Poll each queue */ 13062688fcb7SAndrew J. Bennieston struct netfront_info *info = netdev_priv(dev); 13072688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 13082688fcb7SAndrew J. Bennieston unsigned int i; 13092688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) 13102688fcb7SAndrew J. Bennieston xennet_interrupt(0, &info->queues[i]); 1311cf66f9d4SKonrad Rzeszutek Wilk } 1312cf66f9d4SKonrad Rzeszutek Wilk #endif 1313cf66f9d4SKonrad Rzeszutek Wilk 13140a0b9d2eSStephen Hemminger static const struct net_device_ops xennet_netdev_ops = { 13150a0b9d2eSStephen Hemminger .ndo_open = xennet_open, 13160a0b9d2eSStephen Hemminger .ndo_uninit = xennet_uninit, 13170a0b9d2eSStephen Hemminger .ndo_stop = xennet_close, 13180a0b9d2eSStephen Hemminger .ndo_start_xmit = xennet_start_xmit, 13190a0b9d2eSStephen Hemminger .ndo_change_mtu = xennet_change_mtu, 1320e00f85beSstephen hemminger .ndo_get_stats64 = xennet_get_stats64, 13210a0b9d2eSStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 13220a0b9d2eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1323fb507934SMichał Mirosław .ndo_fix_features = xennet_fix_features, 1324fb507934SMichał Mirosław .ndo_set_features = xennet_set_features, 13252688fcb7SAndrew J. Bennieston .ndo_select_queue = xennet_select_queue, 1326cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1327cf66f9d4SKonrad Rzeszutek Wilk .ndo_poll_controller = xennet_poll_controller, 1328cf66f9d4SKonrad Rzeszutek Wilk #endif 13290a0b9d2eSStephen Hemminger }; 13300a0b9d2eSStephen Hemminger 13318e0e46bbSBill Pemberton static struct net_device *xennet_create_dev(struct xenbus_device *dev) 13320d160211SJeremy Fitzhardinge { 13332688fcb7SAndrew J. Bennieston int err; 13340d160211SJeremy Fitzhardinge struct net_device *netdev; 13350d160211SJeremy Fitzhardinge struct netfront_info *np; 13360d160211SJeremy Fitzhardinge 133750ee6061SAndrew J. Bennieston netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 133841de8d4cSJoe Perches if (!netdev) 13390d160211SJeremy Fitzhardinge return ERR_PTR(-ENOMEM); 13400d160211SJeremy Fitzhardinge 13410d160211SJeremy Fitzhardinge np = netdev_priv(netdev); 13420d160211SJeremy Fitzhardinge np->xbdev = dev; 13430d160211SJeremy Fitzhardinge 13442688fcb7SAndrew J. Bennieston /* No need to use rtnl_lock() before the call below as it 13452688fcb7SAndrew J. Bennieston * happens before register_netdev(). 13462688fcb7SAndrew J. Bennieston */ 13472688fcb7SAndrew J. Bennieston netif_set_real_num_tx_queues(netdev, 0); 13482688fcb7SAndrew J. Bennieston np->queues = NULL; 13490d160211SJeremy Fitzhardinge 1350e00f85beSstephen hemminger err = -ENOMEM; 13511c213bd2SWANG Cong np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1352e00f85beSstephen hemminger if (np->stats == NULL) 1353e00f85beSstephen hemminger goto exit; 1354e00f85beSstephen hemminger 13550a0b9d2eSStephen Hemminger netdev->netdev_ops = &xennet_netdev_ops; 13560a0b9d2eSStephen Hemminger 1357fb507934SMichał Mirosław netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1358fb507934SMichał Mirosław NETIF_F_GSO_ROBUST; 13592c0057deSPaul Durrant netdev->hw_features = NETIF_F_SG | 13602c0057deSPaul Durrant NETIF_F_IPV6_CSUM | 13612c0057deSPaul Durrant NETIF_F_TSO | NETIF_F_TSO6; 13620d160211SJeremy Fitzhardinge 1363fc3e5941SIan Campbell /* 1364fc3e5941SIan Campbell * Assume that all hw features are available for now. This set 1365fc3e5941SIan Campbell * will be adjusted by the call to netdev_update_features() in 1366fc3e5941SIan Campbell * xennet_connect() which is the earliest point where we can 1367fc3e5941SIan Campbell * negotiate with the backend regarding supported features. 1368fc3e5941SIan Campbell */ 1369fc3e5941SIan Campbell netdev->features |= netdev->hw_features; 1370fc3e5941SIan Campbell 13717ad24ea4SWilfried Klaebe netdev->ethtool_ops = &xennet_ethtool_ops; 13720d160211SJeremy Fitzhardinge SET_NETDEV_DEV(netdev, &dev->dev); 13730d160211SJeremy Fitzhardinge 13749ecd1a75SWei Liu netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER); 13759ecd1a75SWei Liu 13760d160211SJeremy Fitzhardinge np->netdev = netdev; 13770d160211SJeremy Fitzhardinge 13780d160211SJeremy Fitzhardinge netif_carrier_off(netdev); 13790d160211SJeremy Fitzhardinge 13800d160211SJeremy Fitzhardinge return netdev; 13810d160211SJeremy Fitzhardinge 13820d160211SJeremy Fitzhardinge exit: 13830d160211SJeremy Fitzhardinge free_netdev(netdev); 13840d160211SJeremy Fitzhardinge return ERR_PTR(err); 13850d160211SJeremy Fitzhardinge } 13860d160211SJeremy Fitzhardinge 13870d160211SJeremy Fitzhardinge /** 13880d160211SJeremy Fitzhardinge * Entry point to this code when a new device is created. Allocate the basic 13890d160211SJeremy Fitzhardinge * structures and the ring buffers for communication with the backend, and 13900d160211SJeremy Fitzhardinge * inform the backend of the appropriate details for those. 13910d160211SJeremy Fitzhardinge */ 13928e0e46bbSBill Pemberton static int netfront_probe(struct xenbus_device *dev, 13930d160211SJeremy Fitzhardinge const struct xenbus_device_id *id) 13940d160211SJeremy Fitzhardinge { 13950d160211SJeremy Fitzhardinge int err; 13960d160211SJeremy Fitzhardinge struct net_device *netdev; 13970d160211SJeremy Fitzhardinge struct netfront_info *info; 13980d160211SJeremy Fitzhardinge 13990d160211SJeremy Fitzhardinge netdev = xennet_create_dev(dev); 14000d160211SJeremy Fitzhardinge if (IS_ERR(netdev)) { 14010d160211SJeremy Fitzhardinge err = PTR_ERR(netdev); 14020d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "creating netdev"); 14030d160211SJeremy Fitzhardinge return err; 14040d160211SJeremy Fitzhardinge } 14050d160211SJeremy Fitzhardinge 14060d160211SJeremy Fitzhardinge info = netdev_priv(netdev); 14071b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, info); 14080d160211SJeremy Fitzhardinge 14090d160211SJeremy Fitzhardinge err = register_netdev(info->netdev); 14100d160211SJeremy Fitzhardinge if (err) { 1411383eda32SJoe Perches pr_warn("%s: register_netdev err=%d\n", __func__, err); 14120d160211SJeremy Fitzhardinge goto fail; 14130d160211SJeremy Fitzhardinge } 14140d160211SJeremy Fitzhardinge 14150d160211SJeremy Fitzhardinge err = xennet_sysfs_addif(info->netdev); 14160d160211SJeremy Fitzhardinge if (err) { 14170d160211SJeremy Fitzhardinge unregister_netdev(info->netdev); 1418383eda32SJoe Perches pr_warn("%s: add sysfs failed err=%d\n", __func__, err); 14190d160211SJeremy Fitzhardinge goto fail; 14200d160211SJeremy Fitzhardinge } 14210d160211SJeremy Fitzhardinge 14220d160211SJeremy Fitzhardinge return 0; 14230d160211SJeremy Fitzhardinge 14240d160211SJeremy Fitzhardinge fail: 14250d160211SJeremy Fitzhardinge free_netdev(netdev); 14261b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, NULL); 14270d160211SJeremy Fitzhardinge return err; 14280d160211SJeremy Fitzhardinge } 14290d160211SJeremy Fitzhardinge 14300d160211SJeremy Fitzhardinge static void xennet_end_access(int ref, void *page) 14310d160211SJeremy Fitzhardinge { 14320d160211SJeremy Fitzhardinge /* This frees the page as a side-effect */ 14330d160211SJeremy Fitzhardinge if (ref != GRANT_INVALID_REF) 14340d160211SJeremy Fitzhardinge gnttab_end_foreign_access(ref, 0, (unsigned long)page); 14350d160211SJeremy Fitzhardinge } 14360d160211SJeremy Fitzhardinge 14370d160211SJeremy Fitzhardinge static void xennet_disconnect_backend(struct netfront_info *info) 14380d160211SJeremy Fitzhardinge { 14392688fcb7SAndrew J. Bennieston unsigned int i = 0; 14402688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues; 14410d160211SJeremy Fitzhardinge 1442f9feb1e6SDavid Vrabel netif_carrier_off(info->netdev); 1443f9feb1e6SDavid Vrabel 14442688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 144576541869SDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 144676541869SDavid Vrabel 14472688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 14482688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 14492688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 14502688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 14512688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->rx_irq, queue); 1452d634bf2cSWei Liu } 14532688fcb7SAndrew J. Bennieston queue->tx_evtchn = queue->rx_evtchn = 0; 14542688fcb7SAndrew J. Bennieston queue->tx_irq = queue->rx_irq = 0; 14550d160211SJeremy Fitzhardinge 1456f9feb1e6SDavid Vrabel napi_synchronize(&queue->napi); 1457f9feb1e6SDavid Vrabel 14580d160211SJeremy Fitzhardinge /* End access and free the pages */ 14592688fcb7SAndrew J. Bennieston xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 14602688fcb7SAndrew J. Bennieston xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 14610d160211SJeremy Fitzhardinge 14622688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 14632688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 14642688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 14652688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 14662688fcb7SAndrew J. Bennieston } 14670d160211SJeremy Fitzhardinge } 14680d160211SJeremy Fitzhardinge 14690d160211SJeremy Fitzhardinge /** 14700d160211SJeremy Fitzhardinge * We are reconnecting to the backend, due to a suspend/resume, or a backend 14710d160211SJeremy Fitzhardinge * driver restart. We tear down our netif structure and recreate it, but 14720d160211SJeremy Fitzhardinge * leave the device-layer structures intact so that this is transparent to the 14730d160211SJeremy Fitzhardinge * rest of the kernel. 14740d160211SJeremy Fitzhardinge */ 14750d160211SJeremy Fitzhardinge static int netfront_resume(struct xenbus_device *dev) 14760d160211SJeremy Fitzhardinge { 14771b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 14780d160211SJeremy Fitzhardinge 14790d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 14800d160211SJeremy Fitzhardinge 14810d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 14820d160211SJeremy Fitzhardinge return 0; 14830d160211SJeremy Fitzhardinge } 14840d160211SJeremy Fitzhardinge 14850d160211SJeremy Fitzhardinge static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 14860d160211SJeremy Fitzhardinge { 14870d160211SJeremy Fitzhardinge char *s, *e, *macstr; 14880d160211SJeremy Fitzhardinge int i; 14890d160211SJeremy Fitzhardinge 14900d160211SJeremy Fitzhardinge macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 14910d160211SJeremy Fitzhardinge if (IS_ERR(macstr)) 14920d160211SJeremy Fitzhardinge return PTR_ERR(macstr); 14930d160211SJeremy Fitzhardinge 14940d160211SJeremy Fitzhardinge for (i = 0; i < ETH_ALEN; i++) { 14950d160211SJeremy Fitzhardinge mac[i] = simple_strtoul(s, &e, 16); 14960d160211SJeremy Fitzhardinge if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 14970d160211SJeremy Fitzhardinge kfree(macstr); 14980d160211SJeremy Fitzhardinge return -ENOENT; 14990d160211SJeremy Fitzhardinge } 15000d160211SJeremy Fitzhardinge s = e+1; 15010d160211SJeremy Fitzhardinge } 15020d160211SJeremy Fitzhardinge 15030d160211SJeremy Fitzhardinge kfree(macstr); 15040d160211SJeremy Fitzhardinge return 0; 15050d160211SJeremy Fitzhardinge } 15060d160211SJeremy Fitzhardinge 15072688fcb7SAndrew J. Bennieston static int setup_netfront_single(struct netfront_queue *queue) 1508d634bf2cSWei Liu { 1509d634bf2cSWei Liu int err; 1510d634bf2cSWei Liu 15112688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1512d634bf2cSWei Liu if (err < 0) 1513d634bf2cSWei Liu goto fail; 1514d634bf2cSWei Liu 15152688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1516d634bf2cSWei Liu xennet_interrupt, 15172688fcb7SAndrew J. Bennieston 0, queue->info->netdev->name, queue); 1518d634bf2cSWei Liu if (err < 0) 1519d634bf2cSWei Liu goto bind_fail; 15202688fcb7SAndrew J. Bennieston queue->rx_evtchn = queue->tx_evtchn; 15212688fcb7SAndrew J. Bennieston queue->rx_irq = queue->tx_irq = err; 1522d634bf2cSWei Liu 1523d634bf2cSWei Liu return 0; 1524d634bf2cSWei Liu 1525d634bf2cSWei Liu bind_fail: 15262688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 15272688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1528d634bf2cSWei Liu fail: 1529d634bf2cSWei Liu return err; 1530d634bf2cSWei Liu } 1531d634bf2cSWei Liu 15322688fcb7SAndrew J. Bennieston static int setup_netfront_split(struct netfront_queue *queue) 1533d634bf2cSWei Liu { 1534d634bf2cSWei Liu int err; 1535d634bf2cSWei Liu 15362688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1537d634bf2cSWei Liu if (err < 0) 1538d634bf2cSWei Liu goto fail; 15392688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1540d634bf2cSWei Liu if (err < 0) 1541d634bf2cSWei Liu goto alloc_rx_evtchn_fail; 1542d634bf2cSWei Liu 15432688fcb7SAndrew J. Bennieston snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 15442688fcb7SAndrew J. Bennieston "%s-tx", queue->name); 15452688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1546d634bf2cSWei Liu xennet_tx_interrupt, 15472688fcb7SAndrew J. Bennieston 0, queue->tx_irq_name, queue); 1548d634bf2cSWei Liu if (err < 0) 1549d634bf2cSWei Liu goto bind_tx_fail; 15502688fcb7SAndrew J. Bennieston queue->tx_irq = err; 1551d634bf2cSWei Liu 15522688fcb7SAndrew J. Bennieston snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 15532688fcb7SAndrew J. Bennieston "%s-rx", queue->name); 15542688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->rx_evtchn, 1555d634bf2cSWei Liu xennet_rx_interrupt, 15562688fcb7SAndrew J. Bennieston 0, queue->rx_irq_name, queue); 1557d634bf2cSWei Liu if (err < 0) 1558d634bf2cSWei Liu goto bind_rx_fail; 15592688fcb7SAndrew J. Bennieston queue->rx_irq = err; 1560d634bf2cSWei Liu 1561d634bf2cSWei Liu return 0; 1562d634bf2cSWei Liu 1563d634bf2cSWei Liu bind_rx_fail: 15642688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 15652688fcb7SAndrew J. Bennieston queue->tx_irq = 0; 1566d634bf2cSWei Liu bind_tx_fail: 15672688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 15682688fcb7SAndrew J. Bennieston queue->rx_evtchn = 0; 1569d634bf2cSWei Liu alloc_rx_evtchn_fail: 15702688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 15712688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1572d634bf2cSWei Liu fail: 1573d634bf2cSWei Liu return err; 1574d634bf2cSWei Liu } 1575d634bf2cSWei Liu 15762688fcb7SAndrew J. Bennieston static int setup_netfront(struct xenbus_device *dev, 15772688fcb7SAndrew J. Bennieston struct netfront_queue *queue, unsigned int feature_split_evtchn) 15780d160211SJeremy Fitzhardinge { 15790d160211SJeremy Fitzhardinge struct xen_netif_tx_sring *txs; 15800d160211SJeremy Fitzhardinge struct xen_netif_rx_sring *rxs; 15810d160211SJeremy Fitzhardinge int err; 15820d160211SJeremy Fitzhardinge 15832688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 15842688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 15852688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 15862688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 15870d160211SJeremy Fitzhardinge 1588a144ff09SIan Campbell txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 15890d160211SJeremy Fitzhardinge if (!txs) { 15900d160211SJeremy Fitzhardinge err = -ENOMEM; 15910d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating tx ring page"); 15920d160211SJeremy Fitzhardinge goto fail; 15930d160211SJeremy Fitzhardinge } 15940d160211SJeremy Fitzhardinge SHARED_RING_INIT(txs); 15952688fcb7SAndrew J. Bennieston FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); 15960d160211SJeremy Fitzhardinge 15970d160211SJeremy Fitzhardinge err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 15981ca2983aSWei Liu if (err < 0) 15991ca2983aSWei Liu goto grant_tx_ring_fail; 16002688fcb7SAndrew J. Bennieston queue->tx_ring_ref = err; 16010d160211SJeremy Fitzhardinge 1602a144ff09SIan Campbell rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 16030d160211SJeremy Fitzhardinge if (!rxs) { 16040d160211SJeremy Fitzhardinge err = -ENOMEM; 16050d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating rx ring page"); 16061ca2983aSWei Liu goto alloc_rx_ring_fail; 16070d160211SJeremy Fitzhardinge } 16080d160211SJeremy Fitzhardinge SHARED_RING_INIT(rxs); 16092688fcb7SAndrew J. Bennieston FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); 16100d160211SJeremy Fitzhardinge 16110d160211SJeremy Fitzhardinge err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 16121ca2983aSWei Liu if (err < 0) 16131ca2983aSWei Liu goto grant_rx_ring_fail; 16142688fcb7SAndrew J. Bennieston queue->rx_ring_ref = err; 16150d160211SJeremy Fitzhardinge 1616d634bf2cSWei Liu if (feature_split_evtchn) 16172688fcb7SAndrew J. Bennieston err = setup_netfront_split(queue); 1618d634bf2cSWei Liu /* setup single event channel if 1619d634bf2cSWei Liu * a) feature-split-event-channels == 0 1620d634bf2cSWei Liu * b) feature-split-event-channels == 1 but failed to setup 1621d634bf2cSWei Liu */ 1622d634bf2cSWei Liu if (!feature_split_evtchn || (feature_split_evtchn && err)) 16232688fcb7SAndrew J. Bennieston err = setup_netfront_single(queue); 1624d634bf2cSWei Liu 16250d160211SJeremy Fitzhardinge if (err) 16261ca2983aSWei Liu goto alloc_evtchn_fail; 16270d160211SJeremy Fitzhardinge 16280d160211SJeremy Fitzhardinge return 0; 16290d160211SJeremy Fitzhardinge 16301ca2983aSWei Liu /* If we fail to setup netfront, it is safe to just revoke access to 16311ca2983aSWei Liu * granted pages because backend is not accessing it at this point. 16321ca2983aSWei Liu */ 16331ca2983aSWei Liu alloc_evtchn_fail: 16342688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); 16351ca2983aSWei Liu grant_rx_ring_fail: 16361ca2983aSWei Liu free_page((unsigned long)rxs); 16371ca2983aSWei Liu alloc_rx_ring_fail: 16382688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); 16391ca2983aSWei Liu grant_tx_ring_fail: 16401ca2983aSWei Liu free_page((unsigned long)txs); 16410d160211SJeremy Fitzhardinge fail: 16420d160211SJeremy Fitzhardinge return err; 16430d160211SJeremy Fitzhardinge } 16440d160211SJeremy Fitzhardinge 16452688fcb7SAndrew J. Bennieston /* Queue-specific initialisation 16462688fcb7SAndrew J. Bennieston * This used to be done in xennet_create_dev() but must now 16472688fcb7SAndrew J. Bennieston * be run per-queue. 16482688fcb7SAndrew J. Bennieston */ 16492688fcb7SAndrew J. Bennieston static int xennet_init_queue(struct netfront_queue *queue) 16502688fcb7SAndrew J. Bennieston { 16512688fcb7SAndrew J. Bennieston unsigned short i; 16522688fcb7SAndrew J. Bennieston int err = 0; 16532688fcb7SAndrew J. Bennieston 16542688fcb7SAndrew J. Bennieston spin_lock_init(&queue->tx_lock); 16552688fcb7SAndrew J. Bennieston spin_lock_init(&queue->rx_lock); 16562688fcb7SAndrew J. Bennieston 16572688fcb7SAndrew J. Bennieston skb_queue_head_init(&queue->rx_batch); 16582688fcb7SAndrew J. Bennieston queue->rx_target = RX_DFL_MIN_TARGET; 16592688fcb7SAndrew J. Bennieston queue->rx_min_target = RX_DFL_MIN_TARGET; 16602688fcb7SAndrew J. Bennieston queue->rx_max_target = RX_MAX_TARGET; 16612688fcb7SAndrew J. Bennieston 16622688fcb7SAndrew J. Bennieston init_timer(&queue->rx_refill_timer); 16632688fcb7SAndrew J. Bennieston queue->rx_refill_timer.data = (unsigned long)queue; 16642688fcb7SAndrew J. Bennieston queue->rx_refill_timer.function = rx_refill_timeout; 16652688fcb7SAndrew J. Bennieston 16668b715010SWei Liu snprintf(queue->name, sizeof(queue->name), "%s-q%u", 16678b715010SWei Liu queue->info->netdev->name, queue->id); 16688b715010SWei Liu 16692688fcb7SAndrew J. Bennieston /* Initialise tx_skbs as a free chain containing every entry. */ 16702688fcb7SAndrew J. Bennieston queue->tx_skb_freelist = 0; 16712688fcb7SAndrew J. Bennieston for (i = 0; i < NET_TX_RING_SIZE; i++) { 16722688fcb7SAndrew J. Bennieston skb_entry_set_link(&queue->tx_skbs[i], i+1); 16732688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 16742688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 16752688fcb7SAndrew J. Bennieston } 16762688fcb7SAndrew J. Bennieston 16772688fcb7SAndrew J. Bennieston /* Clear out rx_skbs */ 16782688fcb7SAndrew J. Bennieston for (i = 0; i < NET_RX_RING_SIZE; i++) { 16792688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 16802688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 16812688fcb7SAndrew J. Bennieston } 16822688fcb7SAndrew J. Bennieston 16832688fcb7SAndrew J. Bennieston /* A grant for every tx ring slot */ 16842688fcb7SAndrew J. Bennieston if (gnttab_alloc_grant_references(TX_MAX_TARGET, 16852688fcb7SAndrew J. Bennieston &queue->gref_tx_head) < 0) { 16862688fcb7SAndrew J. Bennieston pr_alert("can't alloc tx grant refs\n"); 16872688fcb7SAndrew J. Bennieston err = -ENOMEM; 16882688fcb7SAndrew J. Bennieston goto exit; 16892688fcb7SAndrew J. Bennieston } 16902688fcb7SAndrew J. Bennieston 16912688fcb7SAndrew J. Bennieston /* A grant for every rx ring slot */ 16922688fcb7SAndrew J. Bennieston if (gnttab_alloc_grant_references(RX_MAX_TARGET, 16932688fcb7SAndrew J. Bennieston &queue->gref_rx_head) < 0) { 16942688fcb7SAndrew J. Bennieston pr_alert("can't alloc rx grant refs\n"); 16952688fcb7SAndrew J. Bennieston err = -ENOMEM; 16962688fcb7SAndrew J. Bennieston goto exit_free_tx; 16972688fcb7SAndrew J. Bennieston } 16982688fcb7SAndrew J. Bennieston 16992688fcb7SAndrew J. Bennieston return 0; 17002688fcb7SAndrew J. Bennieston 17012688fcb7SAndrew J. Bennieston exit_free_tx: 17022688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head); 17032688fcb7SAndrew J. Bennieston exit: 17042688fcb7SAndrew J. Bennieston return err; 17052688fcb7SAndrew J. Bennieston } 17062688fcb7SAndrew J. Bennieston 170750ee6061SAndrew J. Bennieston static int write_queue_xenstore_keys(struct netfront_queue *queue, 170850ee6061SAndrew J. Bennieston struct xenbus_transaction *xbt, int write_hierarchical) 170950ee6061SAndrew J. Bennieston { 171050ee6061SAndrew J. Bennieston /* Write the queue-specific keys into XenStore in the traditional 171150ee6061SAndrew J. Bennieston * way for a single queue, or in a queue subkeys for multiple 171250ee6061SAndrew J. Bennieston * queues. 171350ee6061SAndrew J. Bennieston */ 171450ee6061SAndrew J. Bennieston struct xenbus_device *dev = queue->info->xbdev; 171550ee6061SAndrew J. Bennieston int err; 171650ee6061SAndrew J. Bennieston const char *message; 171750ee6061SAndrew J. Bennieston char *path; 171850ee6061SAndrew J. Bennieston size_t pathsize; 171950ee6061SAndrew J. Bennieston 172050ee6061SAndrew J. Bennieston /* Choose the correct place to write the keys */ 172150ee6061SAndrew J. Bennieston if (write_hierarchical) { 172250ee6061SAndrew J. Bennieston pathsize = strlen(dev->nodename) + 10; 172350ee6061SAndrew J. Bennieston path = kzalloc(pathsize, GFP_KERNEL); 172450ee6061SAndrew J. Bennieston if (!path) { 172550ee6061SAndrew J. Bennieston err = -ENOMEM; 172650ee6061SAndrew J. Bennieston message = "out of memory while writing ring references"; 172750ee6061SAndrew J. Bennieston goto error; 172850ee6061SAndrew J. Bennieston } 172950ee6061SAndrew J. Bennieston snprintf(path, pathsize, "%s/queue-%u", 173050ee6061SAndrew J. Bennieston dev->nodename, queue->id); 173150ee6061SAndrew J. Bennieston } else { 173250ee6061SAndrew J. Bennieston path = (char *)dev->nodename; 173350ee6061SAndrew J. Bennieston } 173450ee6061SAndrew J. Bennieston 173550ee6061SAndrew J. Bennieston /* Write ring references */ 173650ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 173750ee6061SAndrew J. Bennieston queue->tx_ring_ref); 173850ee6061SAndrew J. Bennieston if (err) { 173950ee6061SAndrew J. Bennieston message = "writing tx-ring-ref"; 174050ee6061SAndrew J. Bennieston goto error; 174150ee6061SAndrew J. Bennieston } 174250ee6061SAndrew J. Bennieston 174350ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 174450ee6061SAndrew J. Bennieston queue->rx_ring_ref); 174550ee6061SAndrew J. Bennieston if (err) { 174650ee6061SAndrew J. Bennieston message = "writing rx-ring-ref"; 174750ee6061SAndrew J. Bennieston goto error; 174850ee6061SAndrew J. Bennieston } 174950ee6061SAndrew J. Bennieston 175050ee6061SAndrew J. Bennieston /* Write event channels; taking into account both shared 175150ee6061SAndrew J. Bennieston * and split event channel scenarios. 175250ee6061SAndrew J. Bennieston */ 175350ee6061SAndrew J. Bennieston if (queue->tx_evtchn == queue->rx_evtchn) { 175450ee6061SAndrew J. Bennieston /* Shared event channel */ 175550ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 175650ee6061SAndrew J. Bennieston "event-channel", "%u", queue->tx_evtchn); 175750ee6061SAndrew J. Bennieston if (err) { 175850ee6061SAndrew J. Bennieston message = "writing event-channel"; 175950ee6061SAndrew J. Bennieston goto error; 176050ee6061SAndrew J. Bennieston } 176150ee6061SAndrew J. Bennieston } else { 176250ee6061SAndrew J. Bennieston /* Split event channels */ 176350ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 176450ee6061SAndrew J. Bennieston "event-channel-tx", "%u", queue->tx_evtchn); 176550ee6061SAndrew J. Bennieston if (err) { 176650ee6061SAndrew J. Bennieston message = "writing event-channel-tx"; 176750ee6061SAndrew J. Bennieston goto error; 176850ee6061SAndrew J. Bennieston } 176950ee6061SAndrew J. Bennieston 177050ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 177150ee6061SAndrew J. Bennieston "event-channel-rx", "%u", queue->rx_evtchn); 177250ee6061SAndrew J. Bennieston if (err) { 177350ee6061SAndrew J. Bennieston message = "writing event-channel-rx"; 177450ee6061SAndrew J. Bennieston goto error; 177550ee6061SAndrew J. Bennieston } 177650ee6061SAndrew J. Bennieston } 177750ee6061SAndrew J. Bennieston 177850ee6061SAndrew J. Bennieston if (write_hierarchical) 177950ee6061SAndrew J. Bennieston kfree(path); 178050ee6061SAndrew J. Bennieston return 0; 178150ee6061SAndrew J. Bennieston 178250ee6061SAndrew J. Bennieston error: 178350ee6061SAndrew J. Bennieston if (write_hierarchical) 178450ee6061SAndrew J. Bennieston kfree(path); 178550ee6061SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "%s", message); 178650ee6061SAndrew J. Bennieston return err; 178750ee6061SAndrew J. Bennieston } 178850ee6061SAndrew J. Bennieston 1789ce58725fSDavid Vrabel static void xennet_destroy_queues(struct netfront_info *info) 1790ce58725fSDavid Vrabel { 1791ce58725fSDavid Vrabel unsigned int i; 1792ce58725fSDavid Vrabel 1793ce58725fSDavid Vrabel rtnl_lock(); 1794ce58725fSDavid Vrabel 1795ce58725fSDavid Vrabel for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 1796ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1797ce58725fSDavid Vrabel 1798ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1799ce58725fSDavid Vrabel napi_disable(&queue->napi); 1800ce58725fSDavid Vrabel netif_napi_del(&queue->napi); 1801ce58725fSDavid Vrabel } 1802ce58725fSDavid Vrabel 1803ce58725fSDavid Vrabel rtnl_unlock(); 1804ce58725fSDavid Vrabel 1805ce58725fSDavid Vrabel kfree(info->queues); 1806ce58725fSDavid Vrabel info->queues = NULL; 1807ce58725fSDavid Vrabel } 1808ce58725fSDavid Vrabel 1809ce58725fSDavid Vrabel static int xennet_create_queues(struct netfront_info *info, 1810ce58725fSDavid Vrabel unsigned int num_queues) 1811ce58725fSDavid Vrabel { 1812ce58725fSDavid Vrabel unsigned int i; 1813ce58725fSDavid Vrabel int ret; 1814ce58725fSDavid Vrabel 1815ce58725fSDavid Vrabel info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), 1816ce58725fSDavid Vrabel GFP_KERNEL); 1817ce58725fSDavid Vrabel if (!info->queues) 1818ce58725fSDavid Vrabel return -ENOMEM; 1819ce58725fSDavid Vrabel 1820ce58725fSDavid Vrabel rtnl_lock(); 1821ce58725fSDavid Vrabel 1822ce58725fSDavid Vrabel for (i = 0; i < num_queues; i++) { 1823ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1824ce58725fSDavid Vrabel 1825ce58725fSDavid Vrabel queue->id = i; 1826ce58725fSDavid Vrabel queue->info = info; 1827ce58725fSDavid Vrabel 1828ce58725fSDavid Vrabel ret = xennet_init_queue(queue); 1829ce58725fSDavid Vrabel if (ret < 0) { 1830ce58725fSDavid Vrabel dev_warn(&info->netdev->dev, "only created %d queues\n", 1831ce58725fSDavid Vrabel num_queues); 1832ce58725fSDavid Vrabel num_queues = i; 1833ce58725fSDavid Vrabel break; 1834ce58725fSDavid Vrabel } 1835ce58725fSDavid Vrabel 1836ce58725fSDavid Vrabel netif_napi_add(queue->info->netdev, &queue->napi, 1837ce58725fSDavid Vrabel xennet_poll, 64); 1838ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1839ce58725fSDavid Vrabel napi_enable(&queue->napi); 1840ce58725fSDavid Vrabel } 1841ce58725fSDavid Vrabel 1842ce58725fSDavid Vrabel netif_set_real_num_tx_queues(info->netdev, num_queues); 1843ce58725fSDavid Vrabel 1844ce58725fSDavid Vrabel rtnl_unlock(); 1845ce58725fSDavid Vrabel 1846ce58725fSDavid Vrabel if (num_queues == 0) { 1847ce58725fSDavid Vrabel dev_err(&info->netdev->dev, "no queues\n"); 1848ce58725fSDavid Vrabel return -EINVAL; 1849ce58725fSDavid Vrabel } 1850ce58725fSDavid Vrabel return 0; 1851ce58725fSDavid Vrabel } 1852ce58725fSDavid Vrabel 18530d160211SJeremy Fitzhardinge /* Common code used when first setting up, and when resuming. */ 1854f502bf2bSIan Campbell static int talk_to_netback(struct xenbus_device *dev, 18550d160211SJeremy Fitzhardinge struct netfront_info *info) 18560d160211SJeremy Fitzhardinge { 18570d160211SJeremy Fitzhardinge const char *message; 18580d160211SJeremy Fitzhardinge struct xenbus_transaction xbt; 18590d160211SJeremy Fitzhardinge int err; 18602688fcb7SAndrew J. Bennieston unsigned int feature_split_evtchn; 18612688fcb7SAndrew J. Bennieston unsigned int i = 0; 186250ee6061SAndrew J. Bennieston unsigned int max_queues = 0; 18632688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 18642688fcb7SAndrew J. Bennieston unsigned int num_queues = 1; 18650d160211SJeremy Fitzhardinge 18662688fcb7SAndrew J. Bennieston info->netdev->irq = 0; 18672688fcb7SAndrew J. Bennieston 186850ee6061SAndrew J. Bennieston /* Check if backend supports multiple queues */ 186950ee6061SAndrew J. Bennieston err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 187050ee6061SAndrew J. Bennieston "multi-queue-max-queues", "%u", &max_queues); 187150ee6061SAndrew J. Bennieston if (err < 0) 187250ee6061SAndrew J. Bennieston max_queues = 1; 187350ee6061SAndrew J. Bennieston num_queues = min(max_queues, xennet_max_queues); 187450ee6061SAndrew J. Bennieston 18752688fcb7SAndrew J. Bennieston /* Check feature-split-event-channels */ 18762688fcb7SAndrew J. Bennieston err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 18772688fcb7SAndrew J. Bennieston "feature-split-event-channels", "%u", 18782688fcb7SAndrew J. Bennieston &feature_split_evtchn); 18792688fcb7SAndrew J. Bennieston if (err < 0) 18802688fcb7SAndrew J. Bennieston feature_split_evtchn = 0; 18812688fcb7SAndrew J. Bennieston 18822688fcb7SAndrew J. Bennieston /* Read mac addr. */ 18832688fcb7SAndrew J. Bennieston err = xen_net_read_mac(dev, info->netdev->dev_addr); 18842688fcb7SAndrew J. Bennieston if (err) { 18852688fcb7SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 18860d160211SJeremy Fitzhardinge goto out; 18872688fcb7SAndrew J. Bennieston } 18882688fcb7SAndrew J. Bennieston 1889ce58725fSDavid Vrabel if (info->queues) 1890ce58725fSDavid Vrabel xennet_destroy_queues(info); 1891ce58725fSDavid Vrabel 1892ce58725fSDavid Vrabel err = xennet_create_queues(info, num_queues); 1893ce58725fSDavid Vrabel if (err < 0) 1894ce58725fSDavid Vrabel goto destroy_ring; 18952688fcb7SAndrew J. Bennieston 18962688fcb7SAndrew J. Bennieston /* Create shared ring, alloc event channel -- for each queue */ 18972688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 18982688fcb7SAndrew J. Bennieston queue = &info->queues[i]; 18992688fcb7SAndrew J. Bennieston err = setup_netfront(dev, queue, feature_split_evtchn); 19002688fcb7SAndrew J. Bennieston if (err) { 1901ce58725fSDavid Vrabel /* setup_netfront() will tidy up the current 1902ce58725fSDavid Vrabel * queue on error, but we need to clean up 19032688fcb7SAndrew J. Bennieston * those already allocated. 19042688fcb7SAndrew J. Bennieston */ 19052688fcb7SAndrew J. Bennieston if (i > 0) { 19062688fcb7SAndrew J. Bennieston rtnl_lock(); 19072688fcb7SAndrew J. Bennieston netif_set_real_num_tx_queues(info->netdev, i); 19082688fcb7SAndrew J. Bennieston rtnl_unlock(); 19092688fcb7SAndrew J. Bennieston goto destroy_ring; 19102688fcb7SAndrew J. Bennieston } else { 19112688fcb7SAndrew J. Bennieston goto out; 19122688fcb7SAndrew J. Bennieston } 19132688fcb7SAndrew J. Bennieston } 19142688fcb7SAndrew J. Bennieston } 19150d160211SJeremy Fitzhardinge 19160d160211SJeremy Fitzhardinge again: 19170d160211SJeremy Fitzhardinge err = xenbus_transaction_start(&xbt); 19180d160211SJeremy Fitzhardinge if (err) { 19190d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "starting transaction"); 19200d160211SJeremy Fitzhardinge goto destroy_ring; 19210d160211SJeremy Fitzhardinge } 19220d160211SJeremy Fitzhardinge 192350ee6061SAndrew J. Bennieston if (num_queues == 1) { 192450ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 192550ee6061SAndrew J. Bennieston if (err) 192650ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1927d634bf2cSWei Liu } else { 192850ee6061SAndrew J. Bennieston /* Write the number of queues */ 192950ee6061SAndrew J. Bennieston err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", 193050ee6061SAndrew J. Bennieston "%u", num_queues); 1931d634bf2cSWei Liu if (err) { 193250ee6061SAndrew J. Bennieston message = "writing multi-queue-num-queues"; 193350ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1934d634bf2cSWei Liu } 193550ee6061SAndrew J. Bennieston 193650ee6061SAndrew J. Bennieston /* Write the keys for each queue */ 193750ee6061SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 193850ee6061SAndrew J. Bennieston queue = &info->queues[i]; 193950ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 194050ee6061SAndrew J. Bennieston if (err) 194150ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1942d634bf2cSWei Liu } 1943d634bf2cSWei Liu } 19440d160211SJeremy Fitzhardinge 194550ee6061SAndrew J. Bennieston /* The remaining keys are not queue-specific */ 19460d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 19470d160211SJeremy Fitzhardinge 1); 19480d160211SJeremy Fitzhardinge if (err) { 19490d160211SJeremy Fitzhardinge message = "writing request-rx-copy"; 19500d160211SJeremy Fitzhardinge goto abort_transaction; 19510d160211SJeremy Fitzhardinge } 19520d160211SJeremy Fitzhardinge 19530d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 19540d160211SJeremy Fitzhardinge if (err) { 19550d160211SJeremy Fitzhardinge message = "writing feature-rx-notify"; 19560d160211SJeremy Fitzhardinge goto abort_transaction; 19570d160211SJeremy Fitzhardinge } 19580d160211SJeremy Fitzhardinge 19590d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 19600d160211SJeremy Fitzhardinge if (err) { 19610d160211SJeremy Fitzhardinge message = "writing feature-sg"; 19620d160211SJeremy Fitzhardinge goto abort_transaction; 19630d160211SJeremy Fitzhardinge } 19640d160211SJeremy Fitzhardinge 19650d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 19660d160211SJeremy Fitzhardinge if (err) { 19670d160211SJeremy Fitzhardinge message = "writing feature-gso-tcpv4"; 19680d160211SJeremy Fitzhardinge goto abort_transaction; 19690d160211SJeremy Fitzhardinge } 19700d160211SJeremy Fitzhardinge 19712c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 19722c0057deSPaul Durrant if (err) { 19732c0057deSPaul Durrant message = "writing feature-gso-tcpv6"; 19742c0057deSPaul Durrant goto abort_transaction; 19752c0057deSPaul Durrant } 19762c0057deSPaul Durrant 19772c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 19782c0057deSPaul Durrant "1"); 19792c0057deSPaul Durrant if (err) { 19802c0057deSPaul Durrant message = "writing feature-ipv6-csum-offload"; 19812c0057deSPaul Durrant goto abort_transaction; 19822c0057deSPaul Durrant } 19832c0057deSPaul Durrant 19840d160211SJeremy Fitzhardinge err = xenbus_transaction_end(xbt, 0); 19850d160211SJeremy Fitzhardinge if (err) { 19860d160211SJeremy Fitzhardinge if (err == -EAGAIN) 19870d160211SJeremy Fitzhardinge goto again; 19880d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "completing transaction"); 19890d160211SJeremy Fitzhardinge goto destroy_ring; 19900d160211SJeremy Fitzhardinge } 19910d160211SJeremy Fitzhardinge 19920d160211SJeremy Fitzhardinge return 0; 19930d160211SJeremy Fitzhardinge 19940d160211SJeremy Fitzhardinge abort_transaction: 19950d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "%s", message); 199650ee6061SAndrew J. Bennieston abort_transaction_no_dev_fatal: 199750ee6061SAndrew J. Bennieston xenbus_transaction_end(xbt, 1); 19980d160211SJeremy Fitzhardinge destroy_ring: 19990d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 20002688fcb7SAndrew J. Bennieston kfree(info->queues); 20012688fcb7SAndrew J. Bennieston info->queues = NULL; 20022688fcb7SAndrew J. Bennieston rtnl_lock(); 20032688fcb7SAndrew J. Bennieston netif_set_real_num_tx_queues(info->netdev, 0); 2004db8c8ab6SDavid Vrabel rtnl_unlock(); 20050d160211SJeremy Fitzhardinge out: 20060d160211SJeremy Fitzhardinge return err; 20070d160211SJeremy Fitzhardinge } 20080d160211SJeremy Fitzhardinge 20090d160211SJeremy Fitzhardinge static int xennet_connect(struct net_device *dev) 20100d160211SJeremy Fitzhardinge { 20110d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 20122688fcb7SAndrew J. Bennieston unsigned int num_queues = 0; 20130d160211SJeremy Fitzhardinge int i, requeue_idx, err; 20140d160211SJeremy Fitzhardinge struct sk_buff *skb; 20150d160211SJeremy Fitzhardinge grant_ref_t ref; 20160d160211SJeremy Fitzhardinge struct xen_netif_rx_request *req; 20170d160211SJeremy Fitzhardinge unsigned int feature_rx_copy; 20182688fcb7SAndrew J. Bennieston unsigned int j = 0; 20192688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 20200d160211SJeremy Fitzhardinge 20210d160211SJeremy Fitzhardinge err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 20220d160211SJeremy Fitzhardinge "feature-rx-copy", "%u", &feature_rx_copy); 20230d160211SJeremy Fitzhardinge if (err != 1) 20240d160211SJeremy Fitzhardinge feature_rx_copy = 0; 20250d160211SJeremy Fitzhardinge 20260d160211SJeremy Fitzhardinge if (!feature_rx_copy) { 20270d160211SJeremy Fitzhardinge dev_info(&dev->dev, 2028898eb71cSJoe Perches "backend does not support copying receive path\n"); 20290d160211SJeremy Fitzhardinge return -ENODEV; 20300d160211SJeremy Fitzhardinge } 20310d160211SJeremy Fitzhardinge 2032f502bf2bSIan Campbell err = talk_to_netback(np->xbdev, np); 20330d160211SJeremy Fitzhardinge if (err) 20340d160211SJeremy Fitzhardinge return err; 20350d160211SJeremy Fitzhardinge 20362688fcb7SAndrew J. Bennieston /* talk_to_netback() sets the correct number of queues */ 20372688fcb7SAndrew J. Bennieston num_queues = dev->real_num_tx_queues; 20382688fcb7SAndrew J. Bennieston 20391ba37c51SIan Campbell rtnl_lock(); 2040fb507934SMichał Mirosław netdev_update_features(dev); 20411ba37c51SIan Campbell rtnl_unlock(); 20420d160211SJeremy Fitzhardinge 20432688fcb7SAndrew J. Bennieston /* By now, the queue structures have been set up */ 20442688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) { 20452688fcb7SAndrew J. Bennieston queue = &np->queues[j]; 20460d160211SJeremy Fitzhardinge 20470d160211SJeremy Fitzhardinge /* Step 1: Discard all pending TX packet fragments. */ 2048f50b4076SDavid Vrabel spin_lock_irq(&queue->tx_lock); 20492688fcb7SAndrew J. Bennieston xennet_release_tx_bufs(queue); 2050f50b4076SDavid Vrabel spin_unlock_irq(&queue->tx_lock); 20510d160211SJeremy Fitzhardinge 20520d160211SJeremy Fitzhardinge /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2053f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock); 2054f50b4076SDavid Vrabel 20550d160211SJeremy Fitzhardinge for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 205601c68026SIan Campbell skb_frag_t *frag; 205701c68026SIan Campbell const struct page *page; 20582688fcb7SAndrew J. Bennieston if (!queue->rx_skbs[i]) 20590d160211SJeremy Fitzhardinge continue; 20600d160211SJeremy Fitzhardinge 20612688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i); 20622688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i); 20632688fcb7SAndrew J. Bennieston req = RING_GET_REQUEST(&queue->rx, requeue_idx); 20640d160211SJeremy Fitzhardinge 206501c68026SIan Campbell frag = &skb_shinfo(skb)->frags[0]; 206601c68026SIan Campbell page = skb_frag_page(frag); 20670d160211SJeremy Fitzhardinge gnttab_grant_foreign_access_ref( 20682688fcb7SAndrew J. Bennieston ref, queue->info->xbdev->otherend_id, 206901c68026SIan Campbell pfn_to_mfn(page_to_pfn(page)), 20700d160211SJeremy Fitzhardinge 0); 20710d160211SJeremy Fitzhardinge req->gref = ref; 20720d160211SJeremy Fitzhardinge req->id = requeue_idx; 20730d160211SJeremy Fitzhardinge 20740d160211SJeremy Fitzhardinge requeue_idx++; 20750d160211SJeremy Fitzhardinge } 20760d160211SJeremy Fitzhardinge 20772688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt = requeue_idx; 2078f50b4076SDavid Vrabel 2079f50b4076SDavid Vrabel spin_unlock_bh(&queue->rx_lock); 20802688fcb7SAndrew J. Bennieston } 20810d160211SJeremy Fitzhardinge 20820d160211SJeremy Fitzhardinge /* 20830d160211SJeremy Fitzhardinge * Step 3: All public and private state should now be sane. Get 20840d160211SJeremy Fitzhardinge * ready to start sending and receiving packets and give the driver 20850d160211SJeremy Fitzhardinge * domain a kick because we've probably just requeued some 20860d160211SJeremy Fitzhardinge * packets. 20870d160211SJeremy Fitzhardinge */ 20880d160211SJeremy Fitzhardinge netif_carrier_on(np->netdev); 20892688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) { 20902688fcb7SAndrew J. Bennieston queue = &np->queues[j]; 2091f50b4076SDavid Vrabel 20922688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 20932688fcb7SAndrew J. Bennieston if (queue->tx_irq != queue->rx_irq) 20942688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 20950d160211SJeremy Fitzhardinge 2096f50b4076SDavid Vrabel spin_lock_irq(&queue->tx_lock); 2097f50b4076SDavid Vrabel xennet_tx_buf_gc(queue); 20982688fcb7SAndrew J. Bennieston spin_unlock_irq(&queue->tx_lock); 2099f50b4076SDavid Vrabel 2100f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock); 2101f50b4076SDavid Vrabel xennet_alloc_rx_buffers(queue); 21022688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 21032688fcb7SAndrew J. Bennieston } 21040d160211SJeremy Fitzhardinge 21050d160211SJeremy Fitzhardinge return 0; 21060d160211SJeremy Fitzhardinge } 21070d160211SJeremy Fitzhardinge 21080d160211SJeremy Fitzhardinge /** 21090d160211SJeremy Fitzhardinge * Callback received when the backend's state changes. 21100d160211SJeremy Fitzhardinge */ 2111f502bf2bSIan Campbell static void netback_changed(struct xenbus_device *dev, 21120d160211SJeremy Fitzhardinge enum xenbus_state backend_state) 21130d160211SJeremy Fitzhardinge { 21141b713e00SGreg Kroah-Hartman struct netfront_info *np = dev_get_drvdata(&dev->dev); 21150d160211SJeremy Fitzhardinge struct net_device *netdev = np->netdev; 21160d160211SJeremy Fitzhardinge 21170d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 21180d160211SJeremy Fitzhardinge 21190d160211SJeremy Fitzhardinge switch (backend_state) { 21200d160211SJeremy Fitzhardinge case XenbusStateInitialising: 21210d160211SJeremy Fitzhardinge case XenbusStateInitialised: 2122b78c9512SNoboru Iwamatsu case XenbusStateReconfiguring: 2123b78c9512SNoboru Iwamatsu case XenbusStateReconfigured: 21240d160211SJeremy Fitzhardinge case XenbusStateUnknown: 21250d160211SJeremy Fitzhardinge break; 21260d160211SJeremy Fitzhardinge 21270d160211SJeremy Fitzhardinge case XenbusStateInitWait: 21280d160211SJeremy Fitzhardinge if (dev->state != XenbusStateInitialising) 21290d160211SJeremy Fitzhardinge break; 21300d160211SJeremy Fitzhardinge if (xennet_connect(netdev) != 0) 21310d160211SJeremy Fitzhardinge break; 21320d160211SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateConnected); 213308e34eb1SLaszlo Ersek break; 213408e34eb1SLaszlo Ersek 213508e34eb1SLaszlo Ersek case XenbusStateConnected: 2136ee89bab1SAmerigo Wang netdev_notify_peers(netdev); 21370d160211SJeremy Fitzhardinge break; 21380d160211SJeremy Fitzhardinge 2139bce3ea81SDavid Vrabel case XenbusStateClosed: 2140bce3ea81SDavid Vrabel if (dev->state == XenbusStateClosed) 2141bce3ea81SDavid Vrabel break; 2142bce3ea81SDavid Vrabel /* Missed the backend's CLOSING state -- fallthrough */ 21430d160211SJeremy Fitzhardinge case XenbusStateClosing: 21440d160211SJeremy Fitzhardinge xenbus_frontend_closed(dev); 21450d160211SJeremy Fitzhardinge break; 21460d160211SJeremy Fitzhardinge } 21470d160211SJeremy Fitzhardinge } 21480d160211SJeremy Fitzhardinge 2149e0ce4af9SIan Campbell static const struct xennet_stat { 2150e0ce4af9SIan Campbell char name[ETH_GSTRING_LEN]; 2151e0ce4af9SIan Campbell u16 offset; 2152e0ce4af9SIan Campbell } xennet_stats[] = { 2153e0ce4af9SIan Campbell { 2154e0ce4af9SIan Campbell "rx_gso_checksum_fixup", 2155e0ce4af9SIan Campbell offsetof(struct netfront_info, rx_gso_checksum_fixup) 2156e0ce4af9SIan Campbell }, 2157e0ce4af9SIan Campbell }; 2158e0ce4af9SIan Campbell 2159e0ce4af9SIan Campbell static int xennet_get_sset_count(struct net_device *dev, int string_set) 2160e0ce4af9SIan Campbell { 2161e0ce4af9SIan Campbell switch (string_set) { 2162e0ce4af9SIan Campbell case ETH_SS_STATS: 2163e0ce4af9SIan Campbell return ARRAY_SIZE(xennet_stats); 2164e0ce4af9SIan Campbell default: 2165e0ce4af9SIan Campbell return -EINVAL; 2166e0ce4af9SIan Campbell } 2167e0ce4af9SIan Campbell } 2168e0ce4af9SIan Campbell 2169e0ce4af9SIan Campbell static void xennet_get_ethtool_stats(struct net_device *dev, 2170e0ce4af9SIan Campbell struct ethtool_stats *stats, u64 * data) 2171e0ce4af9SIan Campbell { 2172e0ce4af9SIan Campbell void *np = netdev_priv(dev); 2173e0ce4af9SIan Campbell int i; 2174e0ce4af9SIan Campbell 2175e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 21762688fcb7SAndrew J. Bennieston data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2177e0ce4af9SIan Campbell } 2178e0ce4af9SIan Campbell 2179e0ce4af9SIan Campbell static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2180e0ce4af9SIan Campbell { 2181e0ce4af9SIan Campbell int i; 2182e0ce4af9SIan Campbell 2183e0ce4af9SIan Campbell switch (stringset) { 2184e0ce4af9SIan Campbell case ETH_SS_STATS: 2185e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2186e0ce4af9SIan Campbell memcpy(data + i * ETH_GSTRING_LEN, 2187e0ce4af9SIan Campbell xennet_stats[i].name, ETH_GSTRING_LEN); 2188e0ce4af9SIan Campbell break; 2189e0ce4af9SIan Campbell } 2190e0ce4af9SIan Campbell } 2191e0ce4af9SIan Campbell 21920fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops = 21930d160211SJeremy Fitzhardinge { 21940d160211SJeremy Fitzhardinge .get_link = ethtool_op_get_link, 2195e0ce4af9SIan Campbell 2196e0ce4af9SIan Campbell .get_sset_count = xennet_get_sset_count, 2197e0ce4af9SIan Campbell .get_ethtool_stats = xennet_get_ethtool_stats, 2198e0ce4af9SIan Campbell .get_strings = xennet_get_strings, 21990d160211SJeremy Fitzhardinge }; 22000d160211SJeremy Fitzhardinge 22010d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 22020d160211SJeremy Fitzhardinge static ssize_t show_rxbuf_min(struct device *dev, 22030d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 22040d160211SJeremy Fitzhardinge { 22050d160211SJeremy Fitzhardinge struct net_device *netdev = to_net_dev(dev); 22060d160211SJeremy Fitzhardinge struct netfront_info *info = netdev_priv(netdev); 22072688fcb7SAndrew J. Bennieston unsigned int num_queues = netdev->real_num_tx_queues; 22080d160211SJeremy Fitzhardinge 22092688fcb7SAndrew J. Bennieston if (num_queues) 22102688fcb7SAndrew J. Bennieston return sprintf(buf, "%u\n", info->queues[0].rx_min_target); 22112688fcb7SAndrew J. Bennieston else 22122688fcb7SAndrew J. Bennieston return sprintf(buf, "%u\n", RX_MIN_TARGET); 22130d160211SJeremy Fitzhardinge } 22140d160211SJeremy Fitzhardinge 22150d160211SJeremy Fitzhardinge static ssize_t store_rxbuf_min(struct device *dev, 22160d160211SJeremy Fitzhardinge struct device_attribute *attr, 22170d160211SJeremy Fitzhardinge const char *buf, size_t len) 22180d160211SJeremy Fitzhardinge { 22190d160211SJeremy Fitzhardinge struct net_device *netdev = to_net_dev(dev); 22200d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(netdev); 22212688fcb7SAndrew J. Bennieston unsigned int num_queues = netdev->real_num_tx_queues; 22220d160211SJeremy Fitzhardinge char *endp; 22230d160211SJeremy Fitzhardinge unsigned long target; 22242688fcb7SAndrew J. Bennieston unsigned int i; 22252688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 22260d160211SJeremy Fitzhardinge 22270d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN)) 22280d160211SJeremy Fitzhardinge return -EPERM; 22290d160211SJeremy Fitzhardinge 22300d160211SJeremy Fitzhardinge target = simple_strtoul(buf, &endp, 0); 22310d160211SJeremy Fitzhardinge if (endp == buf) 22320d160211SJeremy Fitzhardinge return -EBADMSG; 22330d160211SJeremy Fitzhardinge 22340d160211SJeremy Fitzhardinge if (target < RX_MIN_TARGET) 22350d160211SJeremy Fitzhardinge target = RX_MIN_TARGET; 22360d160211SJeremy Fitzhardinge if (target > RX_MAX_TARGET) 22370d160211SJeremy Fitzhardinge target = RX_MAX_TARGET; 22380d160211SJeremy Fitzhardinge 22392688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 22402688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 22412688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 22422688fcb7SAndrew J. Bennieston if (target > queue->rx_max_target) 22432688fcb7SAndrew J. Bennieston queue->rx_max_target = target; 22442688fcb7SAndrew J. Bennieston queue->rx_min_target = target; 22452688fcb7SAndrew J. Bennieston if (target > queue->rx_target) 22462688fcb7SAndrew J. Bennieston queue->rx_target = target; 22470d160211SJeremy Fitzhardinge 22482688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 22490d160211SJeremy Fitzhardinge 22502688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 22512688fcb7SAndrew J. Bennieston } 22520d160211SJeremy Fitzhardinge return len; 22530d160211SJeremy Fitzhardinge } 22540d160211SJeremy Fitzhardinge 22550d160211SJeremy Fitzhardinge static ssize_t show_rxbuf_max(struct device *dev, 22560d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 22570d160211SJeremy Fitzhardinge { 22580d160211SJeremy Fitzhardinge struct net_device *netdev = to_net_dev(dev); 22590d160211SJeremy Fitzhardinge struct netfront_info *info = netdev_priv(netdev); 22602688fcb7SAndrew J. Bennieston unsigned int num_queues = netdev->real_num_tx_queues; 22610d160211SJeremy Fitzhardinge 22622688fcb7SAndrew J. Bennieston if (num_queues) 22632688fcb7SAndrew J. Bennieston return sprintf(buf, "%u\n", info->queues[0].rx_max_target); 22642688fcb7SAndrew J. Bennieston else 22652688fcb7SAndrew J. Bennieston return sprintf(buf, "%u\n", RX_MAX_TARGET); 22660d160211SJeremy Fitzhardinge } 22670d160211SJeremy Fitzhardinge 22680d160211SJeremy Fitzhardinge static ssize_t store_rxbuf_max(struct device *dev, 22690d160211SJeremy Fitzhardinge struct device_attribute *attr, 22700d160211SJeremy Fitzhardinge const char *buf, size_t len) 22710d160211SJeremy Fitzhardinge { 22720d160211SJeremy Fitzhardinge struct net_device *netdev = to_net_dev(dev); 22730d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(netdev); 22742688fcb7SAndrew J. Bennieston unsigned int num_queues = netdev->real_num_tx_queues; 22750d160211SJeremy Fitzhardinge char *endp; 22760d160211SJeremy Fitzhardinge unsigned long target; 22772688fcb7SAndrew J. Bennieston unsigned int i = 0; 22782688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 22790d160211SJeremy Fitzhardinge 22800d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN)) 22810d160211SJeremy Fitzhardinge return -EPERM; 22820d160211SJeremy Fitzhardinge 22830d160211SJeremy Fitzhardinge target = simple_strtoul(buf, &endp, 0); 22840d160211SJeremy Fitzhardinge if (endp == buf) 22850d160211SJeremy Fitzhardinge return -EBADMSG; 22860d160211SJeremy Fitzhardinge 22870d160211SJeremy Fitzhardinge if (target < RX_MIN_TARGET) 22880d160211SJeremy Fitzhardinge target = RX_MIN_TARGET; 22890d160211SJeremy Fitzhardinge if (target > RX_MAX_TARGET) 22900d160211SJeremy Fitzhardinge target = RX_MAX_TARGET; 22910d160211SJeremy Fitzhardinge 22922688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 22932688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 22942688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 22952688fcb7SAndrew J. Bennieston if (target < queue->rx_min_target) 22962688fcb7SAndrew J. Bennieston queue->rx_min_target = target; 22972688fcb7SAndrew J. Bennieston queue->rx_max_target = target; 22982688fcb7SAndrew J. Bennieston if (target < queue->rx_target) 22992688fcb7SAndrew J. Bennieston queue->rx_target = target; 23000d160211SJeremy Fitzhardinge 23012688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 23020d160211SJeremy Fitzhardinge 23032688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 23042688fcb7SAndrew J. Bennieston } 23050d160211SJeremy Fitzhardinge return len; 23060d160211SJeremy Fitzhardinge } 23070d160211SJeremy Fitzhardinge 23080d160211SJeremy Fitzhardinge static ssize_t show_rxbuf_cur(struct device *dev, 23090d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 23100d160211SJeremy Fitzhardinge { 23110d160211SJeremy Fitzhardinge struct net_device *netdev = to_net_dev(dev); 23120d160211SJeremy Fitzhardinge struct netfront_info *info = netdev_priv(netdev); 23132688fcb7SAndrew J. Bennieston unsigned int num_queues = netdev->real_num_tx_queues; 23140d160211SJeremy Fitzhardinge 23152688fcb7SAndrew J. Bennieston if (num_queues) 23162688fcb7SAndrew J. Bennieston return sprintf(buf, "%u\n", info->queues[0].rx_target); 23172688fcb7SAndrew J. Bennieston else 23182688fcb7SAndrew J. Bennieston return sprintf(buf, "0\n"); 23190d160211SJeremy Fitzhardinge } 23200d160211SJeremy Fitzhardinge 23210d160211SJeremy Fitzhardinge static struct device_attribute xennet_attrs[] = { 23220d160211SJeremy Fitzhardinge __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), 23230d160211SJeremy Fitzhardinge __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), 23240d160211SJeremy Fitzhardinge __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), 23250d160211SJeremy Fitzhardinge }; 23260d160211SJeremy Fitzhardinge 23270d160211SJeremy Fitzhardinge static int xennet_sysfs_addif(struct net_device *netdev) 23280d160211SJeremy Fitzhardinge { 23290d160211SJeremy Fitzhardinge int i; 23300d160211SJeremy Fitzhardinge int err; 23310d160211SJeremy Fitzhardinge 23320d160211SJeremy Fitzhardinge for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { 23330d160211SJeremy Fitzhardinge err = device_create_file(&netdev->dev, 23340d160211SJeremy Fitzhardinge &xennet_attrs[i]); 23350d160211SJeremy Fitzhardinge if (err) 23360d160211SJeremy Fitzhardinge goto fail; 23370d160211SJeremy Fitzhardinge } 23380d160211SJeremy Fitzhardinge return 0; 23390d160211SJeremy Fitzhardinge 23400d160211SJeremy Fitzhardinge fail: 23410d160211SJeremy Fitzhardinge while (--i >= 0) 23420d160211SJeremy Fitzhardinge device_remove_file(&netdev->dev, &xennet_attrs[i]); 23430d160211SJeremy Fitzhardinge return err; 23440d160211SJeremy Fitzhardinge } 23450d160211SJeremy Fitzhardinge 23460d160211SJeremy Fitzhardinge static void xennet_sysfs_delif(struct net_device *netdev) 23470d160211SJeremy Fitzhardinge { 23480d160211SJeremy Fitzhardinge int i; 23490d160211SJeremy Fitzhardinge 23500d160211SJeremy Fitzhardinge for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 23510d160211SJeremy Fitzhardinge device_remove_file(&netdev->dev, &xennet_attrs[i]); 23520d160211SJeremy Fitzhardinge } 23530d160211SJeremy Fitzhardinge 23540d160211SJeremy Fitzhardinge #endif /* CONFIG_SYSFS */ 23550d160211SJeremy Fitzhardinge 235673db144bSJan Beulich static const struct xenbus_device_id netfront_ids[] = { 23570d160211SJeremy Fitzhardinge { "vif" }, 23580d160211SJeremy Fitzhardinge { "" } 23590d160211SJeremy Fitzhardinge }; 23600d160211SJeremy Fitzhardinge 23610d160211SJeremy Fitzhardinge 23628e0e46bbSBill Pemberton static int xennet_remove(struct xenbus_device *dev) 23630d160211SJeremy Fitzhardinge { 23641b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 23652688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues; 23662688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 23672688fcb7SAndrew J. Bennieston unsigned int i = 0; 23680d160211SJeremy Fitzhardinge 23690d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 23700d160211SJeremy Fitzhardinge 23710d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 23720d160211SJeremy Fitzhardinge 23730d160211SJeremy Fitzhardinge xennet_sysfs_delif(info->netdev); 23740d160211SJeremy Fitzhardinge 23756bc96d04SIan Campbell unregister_netdev(info->netdev); 23766bc96d04SIan Campbell 23772688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 23782688fcb7SAndrew J. Bennieston queue = &info->queues[i]; 23792688fcb7SAndrew J. Bennieston del_timer_sync(&queue->rx_refill_timer); 23802688fcb7SAndrew J. Bennieston } 23812688fcb7SAndrew J. Bennieston 23822688fcb7SAndrew J. Bennieston if (num_queues) { 23832688fcb7SAndrew J. Bennieston kfree(info->queues); 23842688fcb7SAndrew J. Bennieston info->queues = NULL; 23852688fcb7SAndrew J. Bennieston } 23866bc96d04SIan Campbell 2387e00f85beSstephen hemminger free_percpu(info->stats); 2388e00f85beSstephen hemminger 23890d160211SJeremy Fitzhardinge free_netdev(info->netdev); 23900d160211SJeremy Fitzhardinge 23910d160211SJeremy Fitzhardinge return 0; 23920d160211SJeremy Fitzhardinge } 23930d160211SJeremy Fitzhardinge 239473db144bSJan Beulich static DEFINE_XENBUS_DRIVER(netfront, , 23950d160211SJeremy Fitzhardinge .probe = netfront_probe, 23968e0e46bbSBill Pemberton .remove = xennet_remove, 23970d160211SJeremy Fitzhardinge .resume = netfront_resume, 2398f502bf2bSIan Campbell .otherend_changed = netback_changed, 239973db144bSJan Beulich ); 24000d160211SJeremy Fitzhardinge 24010d160211SJeremy Fitzhardinge static int __init netif_init(void) 24020d160211SJeremy Fitzhardinge { 24036e833587SJeremy Fitzhardinge if (!xen_domain()) 24040d160211SJeremy Fitzhardinge return -ENODEV; 24050d160211SJeremy Fitzhardinge 240651c71a3bSKonrad Rzeszutek Wilk if (!xen_has_pv_nic_devices()) 2407b9136d20SIgor Mammedov return -ENODEV; 2408b9136d20SIgor Mammedov 2409383eda32SJoe Perches pr_info("Initialising Xen virtual ethernet driver\n"); 24100d160211SJeremy Fitzhardinge 241150ee6061SAndrew J. Bennieston /* Allow as many queues as there are CPUs, by default */ 241250ee6061SAndrew J. Bennieston xennet_max_queues = num_online_cpus(); 241350ee6061SAndrew J. Bennieston 2414ffb78a26SAl Viro return xenbus_register_frontend(&netfront_driver); 24150d160211SJeremy Fitzhardinge } 24160d160211SJeremy Fitzhardinge module_init(netif_init); 24170d160211SJeremy Fitzhardinge 24180d160211SJeremy Fitzhardinge 24190d160211SJeremy Fitzhardinge static void __exit netif_exit(void) 24200d160211SJeremy Fitzhardinge { 2421ffb78a26SAl Viro xenbus_unregister_driver(&netfront_driver); 24220d160211SJeremy Fitzhardinge } 24230d160211SJeremy Fitzhardinge module_exit(netif_exit); 24240d160211SJeremy Fitzhardinge 24250d160211SJeremy Fitzhardinge MODULE_DESCRIPTION("Xen virtual network device frontend"); 24260d160211SJeremy Fitzhardinge MODULE_LICENSE("GPL"); 2427d2f0c52bSMark McLoughlin MODULE_ALIAS("xen:vif"); 24284f93f09bSMark McLoughlin MODULE_ALIAS("xennet"); 2429