10d160211SJeremy Fitzhardinge /* 20d160211SJeremy Fitzhardinge * Virtual network driver for conversing with remote driver backends. 30d160211SJeremy Fitzhardinge * 40d160211SJeremy Fitzhardinge * Copyright (c) 2002-2005, K A Fraser 50d160211SJeremy Fitzhardinge * Copyright (c) 2005, XenSource Ltd 60d160211SJeremy Fitzhardinge * 70d160211SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or 80d160211SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2 90d160211SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed 100d160211SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other 110d160211SJeremy Fitzhardinge * software packages, subject to the following license: 120d160211SJeremy Fitzhardinge * 130d160211SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy 140d160211SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without 150d160211SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify, 160d160211SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software, 170d160211SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to 180d160211SJeremy Fitzhardinge * the following conditions: 190d160211SJeremy Fitzhardinge * 200d160211SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in 210d160211SJeremy Fitzhardinge * all copies or substantial portions of the Software. 220d160211SJeremy Fitzhardinge * 230d160211SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 240d160211SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 250d160211SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 260d160211SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 270d160211SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 280d160211SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 290d160211SJeremy Fitzhardinge * IN THE SOFTWARE. 300d160211SJeremy Fitzhardinge */ 310d160211SJeremy Fitzhardinge 32383eda32SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33383eda32SJoe Perches 340d160211SJeremy Fitzhardinge #include <linux/module.h> 350d160211SJeremy Fitzhardinge #include <linux/kernel.h> 360d160211SJeremy Fitzhardinge #include <linux/netdevice.h> 370d160211SJeremy Fitzhardinge #include <linux/etherdevice.h> 380d160211SJeremy Fitzhardinge #include <linux/skbuff.h> 390d160211SJeremy Fitzhardinge #include <linux/ethtool.h> 400d160211SJeremy Fitzhardinge #include <linux/if_ether.h> 419ecd1a75SWei Liu #include <net/tcp.h> 420d160211SJeremy Fitzhardinge #include <linux/udp.h> 430d160211SJeremy Fitzhardinge #include <linux/moduleparam.h> 440d160211SJeremy Fitzhardinge #include <linux/mm.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 460d160211SJeremy Fitzhardinge #include <net/ip.h> 470d160211SJeremy Fitzhardinge 481ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 490d160211SJeremy Fitzhardinge #include <xen/xenbus.h> 500d160211SJeremy Fitzhardinge #include <xen/events.h> 510d160211SJeremy Fitzhardinge #include <xen/page.h> 52b9136d20SIgor Mammedov #include <xen/platform_pci.h> 530d160211SJeremy Fitzhardinge #include <xen/grant_table.h> 540d160211SJeremy Fitzhardinge 550d160211SJeremy Fitzhardinge #include <xen/interface/io/netif.h> 560d160211SJeremy Fitzhardinge #include <xen/interface/memory.h> 570d160211SJeremy Fitzhardinge #include <xen/interface/grant_table.h> 580d160211SJeremy Fitzhardinge 5950ee6061SAndrew J. Bennieston /* Module parameters */ 6050ee6061SAndrew J. Bennieston static unsigned int xennet_max_queues; 6150ee6061SAndrew J. Bennieston module_param_named(max_queues, xennet_max_queues, uint, 0644); 6250ee6061SAndrew J. Bennieston MODULE_PARM_DESC(max_queues, 6350ee6061SAndrew J. Bennieston "Maximum number of queues per virtual interface"); 6450ee6061SAndrew J. Bennieston 650fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops; 660d160211SJeremy Fitzhardinge 670d160211SJeremy Fitzhardinge struct netfront_cb { 683683243bSIan Campbell int pull_to; 690d160211SJeremy Fitzhardinge }; 700d160211SJeremy Fitzhardinge 710d160211SJeremy Fitzhardinge #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 720d160211SJeremy Fitzhardinge 730d160211SJeremy Fitzhardinge #define RX_COPY_THRESHOLD 256 740d160211SJeremy Fitzhardinge 750d160211SJeremy Fitzhardinge #define GRANT_INVALID_REF 0 760d160211SJeremy Fitzhardinge 77667c78afSJeremy Fitzhardinge #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) 78667c78afSJeremy Fitzhardinge #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 791f3c2ebaSDavid Vrabel 801f3c2ebaSDavid Vrabel /* Minimum number of Rx slots (includes slot for GSO metadata). */ 811f3c2ebaSDavid Vrabel #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 820d160211SJeremy Fitzhardinge 832688fcb7SAndrew J. Bennieston /* Queue name is interface name with "-qNNN" appended */ 842688fcb7SAndrew J. Bennieston #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 852688fcb7SAndrew J. Bennieston 862688fcb7SAndrew J. Bennieston /* IRQ name is queue name with "-tx" or "-rx" appended */ 872688fcb7SAndrew J. Bennieston #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 882688fcb7SAndrew J. Bennieston 89e00f85beSstephen hemminger struct netfront_stats { 90900e1833SDavid Vrabel u64 packets; 91900e1833SDavid Vrabel u64 bytes; 92e00f85beSstephen hemminger struct u64_stats_sync syncp; 93e00f85beSstephen hemminger }; 94e00f85beSstephen hemminger 952688fcb7SAndrew J. Bennieston struct netfront_info; 962688fcb7SAndrew J. Bennieston 972688fcb7SAndrew J. Bennieston struct netfront_queue { 982688fcb7SAndrew J. Bennieston unsigned int id; /* Queue ID, 0-based */ 992688fcb7SAndrew J. Bennieston char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 1002688fcb7SAndrew J. Bennieston struct netfront_info *info; 1010d160211SJeremy Fitzhardinge 102bea3348eSStephen Hemminger struct napi_struct napi; 1030d160211SJeremy Fitzhardinge 104d634bf2cSWei Liu /* Split event channels support, tx_* == rx_* when using 105d634bf2cSWei Liu * single event channel. 106d634bf2cSWei Liu */ 107d634bf2cSWei Liu unsigned int tx_evtchn, rx_evtchn; 108d634bf2cSWei Liu unsigned int tx_irq, rx_irq; 109d634bf2cSWei Liu /* Only used when split event channels support is enabled */ 1102688fcb7SAndrew J. Bennieston char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 1112688fcb7SAndrew J. Bennieston char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 1120d160211SJeremy Fitzhardinge 1130d160211SJeremy Fitzhardinge spinlock_t tx_lock; 11484284d3cSJeremy Fitzhardinge struct xen_netif_tx_front_ring tx; 11584284d3cSJeremy Fitzhardinge int tx_ring_ref; 1160d160211SJeremy Fitzhardinge 1170d160211SJeremy Fitzhardinge /* 1180d160211SJeremy Fitzhardinge * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 1190d160211SJeremy Fitzhardinge * are linked from tx_skb_freelist through skb_entry.link. 1200d160211SJeremy Fitzhardinge * 1210d160211SJeremy Fitzhardinge * NB. Freelist index entries are always going to be less than 1220d160211SJeremy Fitzhardinge * PAGE_OFFSET, whereas pointers to skbs will always be equal or 1230d160211SJeremy Fitzhardinge * greater than PAGE_OFFSET: we use this property to distinguish 1240d160211SJeremy Fitzhardinge * them. 1250d160211SJeremy Fitzhardinge */ 1260d160211SJeremy Fitzhardinge union skb_entry { 1270d160211SJeremy Fitzhardinge struct sk_buff *skb; 1281ffb40b8SIsaku Yamahata unsigned long link; 1290d160211SJeremy Fitzhardinge } tx_skbs[NET_TX_RING_SIZE]; 1300d160211SJeremy Fitzhardinge grant_ref_t gref_tx_head; 1310d160211SJeremy Fitzhardinge grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 132cefe0078SAnnie Li struct page *grant_tx_page[NET_TX_RING_SIZE]; 1330d160211SJeremy Fitzhardinge unsigned tx_skb_freelist; 1340d160211SJeremy Fitzhardinge 13584284d3cSJeremy Fitzhardinge spinlock_t rx_lock ____cacheline_aligned_in_smp; 13684284d3cSJeremy Fitzhardinge struct xen_netif_rx_front_ring rx; 13784284d3cSJeremy Fitzhardinge int rx_ring_ref; 13884284d3cSJeremy Fitzhardinge 13984284d3cSJeremy Fitzhardinge struct timer_list rx_refill_timer; 14084284d3cSJeremy Fitzhardinge 1410d160211SJeremy Fitzhardinge struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 1420d160211SJeremy Fitzhardinge grant_ref_t gref_rx_head; 1430d160211SJeremy Fitzhardinge grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 1442688fcb7SAndrew J. Bennieston }; 1452688fcb7SAndrew J. Bennieston 1462688fcb7SAndrew J. Bennieston struct netfront_info { 1472688fcb7SAndrew J. Bennieston struct list_head list; 1482688fcb7SAndrew J. Bennieston struct net_device *netdev; 1492688fcb7SAndrew J. Bennieston 1502688fcb7SAndrew J. Bennieston struct xenbus_device *xbdev; 1512688fcb7SAndrew J. Bennieston 1522688fcb7SAndrew J. Bennieston /* Multi-queue support */ 1532688fcb7SAndrew J. Bennieston struct netfront_queue *queues; 154e0ce4af9SIan Campbell 155e0ce4af9SIan Campbell /* Statistics */ 156900e1833SDavid Vrabel struct netfront_stats __percpu *rx_stats; 157900e1833SDavid Vrabel struct netfront_stats __percpu *tx_stats; 158e00f85beSstephen hemminger 1592688fcb7SAndrew J. Bennieston atomic_t rx_gso_checksum_fixup; 1600d160211SJeremy Fitzhardinge }; 1610d160211SJeremy Fitzhardinge 1620d160211SJeremy Fitzhardinge struct netfront_rx_info { 1630d160211SJeremy Fitzhardinge struct xen_netif_rx_response rx; 1640d160211SJeremy Fitzhardinge struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 1650d160211SJeremy Fitzhardinge }; 1660d160211SJeremy Fitzhardinge 1671ffb40b8SIsaku Yamahata static void skb_entry_set_link(union skb_entry *list, unsigned short id) 1681ffb40b8SIsaku Yamahata { 1691ffb40b8SIsaku Yamahata list->link = id; 1701ffb40b8SIsaku Yamahata } 1711ffb40b8SIsaku Yamahata 1721ffb40b8SIsaku Yamahata static int skb_entry_is_link(const union skb_entry *list) 1731ffb40b8SIsaku Yamahata { 1741ffb40b8SIsaku Yamahata BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 175807540baSEric Dumazet return (unsigned long)list->skb < PAGE_OFFSET; 1761ffb40b8SIsaku Yamahata } 1771ffb40b8SIsaku Yamahata 1780d160211SJeremy Fitzhardinge /* 1790d160211SJeremy Fitzhardinge * Access macros for acquiring freeing slots in tx_skbs[]. 1800d160211SJeremy Fitzhardinge */ 1810d160211SJeremy Fitzhardinge 1820d160211SJeremy Fitzhardinge static void add_id_to_freelist(unsigned *head, union skb_entry *list, 1830d160211SJeremy Fitzhardinge unsigned short id) 1840d160211SJeremy Fitzhardinge { 1851ffb40b8SIsaku Yamahata skb_entry_set_link(&list[id], *head); 1860d160211SJeremy Fitzhardinge *head = id; 1870d160211SJeremy Fitzhardinge } 1880d160211SJeremy Fitzhardinge 1890d160211SJeremy Fitzhardinge static unsigned short get_id_from_freelist(unsigned *head, 1900d160211SJeremy Fitzhardinge union skb_entry *list) 1910d160211SJeremy Fitzhardinge { 1920d160211SJeremy Fitzhardinge unsigned int id = *head; 1930d160211SJeremy Fitzhardinge *head = list[id].link; 1940d160211SJeremy Fitzhardinge return id; 1950d160211SJeremy Fitzhardinge } 1960d160211SJeremy Fitzhardinge 1970d160211SJeremy Fitzhardinge static int xennet_rxidx(RING_IDX idx) 1980d160211SJeremy Fitzhardinge { 1990d160211SJeremy Fitzhardinge return idx & (NET_RX_RING_SIZE - 1); 2000d160211SJeremy Fitzhardinge } 2010d160211SJeremy Fitzhardinge 2022688fcb7SAndrew J. Bennieston static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 2030d160211SJeremy Fitzhardinge RING_IDX ri) 2040d160211SJeremy Fitzhardinge { 2050d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2062688fcb7SAndrew J. Bennieston struct sk_buff *skb = queue->rx_skbs[i]; 2072688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 2080d160211SJeremy Fitzhardinge return skb; 2090d160211SJeremy Fitzhardinge } 2100d160211SJeremy Fitzhardinge 2112688fcb7SAndrew J. Bennieston static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 2120d160211SJeremy Fitzhardinge RING_IDX ri) 2130d160211SJeremy Fitzhardinge { 2140d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2152688fcb7SAndrew J. Bennieston grant_ref_t ref = queue->grant_rx_ref[i]; 2162688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 2170d160211SJeremy Fitzhardinge return ref; 2180d160211SJeremy Fitzhardinge } 2190d160211SJeremy Fitzhardinge 2200d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 22127b917e5STakashi Iwai static const struct attribute_group xennet_dev_group; 2220d160211SJeremy Fitzhardinge #endif 2230d160211SJeremy Fitzhardinge 2243ad9b358SMichał Mirosław static bool xennet_can_sg(struct net_device *dev) 2250d160211SJeremy Fitzhardinge { 2263ad9b358SMichał Mirosław return dev->features & NETIF_F_SG; 2270d160211SJeremy Fitzhardinge } 2280d160211SJeremy Fitzhardinge 2290d160211SJeremy Fitzhardinge 2300d160211SJeremy Fitzhardinge static void rx_refill_timeout(unsigned long data) 2310d160211SJeremy Fitzhardinge { 2322688fcb7SAndrew J. Bennieston struct netfront_queue *queue = (struct netfront_queue *)data; 2332688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 2340d160211SJeremy Fitzhardinge } 2350d160211SJeremy Fitzhardinge 2362688fcb7SAndrew J. Bennieston static int netfront_tx_slot_available(struct netfront_queue *queue) 2370d160211SJeremy Fitzhardinge { 2382688fcb7SAndrew J. Bennieston return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 2391f3c2ebaSDavid Vrabel (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); 2400d160211SJeremy Fitzhardinge } 2410d160211SJeremy Fitzhardinge 2422688fcb7SAndrew J. Bennieston static void xennet_maybe_wake_tx(struct netfront_queue *queue) 2430d160211SJeremy Fitzhardinge { 2442688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 2452688fcb7SAndrew J. Bennieston struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 2460d160211SJeremy Fitzhardinge 2472688fcb7SAndrew J. Bennieston if (unlikely(netif_tx_queue_stopped(dev_queue)) && 2482688fcb7SAndrew J. Bennieston netfront_tx_slot_available(queue) && 2490d160211SJeremy Fitzhardinge likely(netif_running(dev))) 2502688fcb7SAndrew J. Bennieston netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 2510d160211SJeremy Fitzhardinge } 2520d160211SJeremy Fitzhardinge 2531f3c2ebaSDavid Vrabel 2541f3c2ebaSDavid Vrabel static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) 2550d160211SJeremy Fitzhardinge { 2560d160211SJeremy Fitzhardinge struct sk_buff *skb; 2570d160211SJeremy Fitzhardinge struct page *page; 2580d160211SJeremy Fitzhardinge 2592688fcb7SAndrew J. Bennieston skb = __netdev_alloc_skb(queue->info->netdev, 2602688fcb7SAndrew J. Bennieston RX_COPY_THRESHOLD + NET_IP_ALIGN, 2610d160211SJeremy Fitzhardinge GFP_ATOMIC | __GFP_NOWARN); 2620d160211SJeremy Fitzhardinge if (unlikely(!skb)) 2631f3c2ebaSDavid Vrabel return NULL; 264617a20bbSIsaku Yamahata 2650d160211SJeremy Fitzhardinge page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 2660d160211SJeremy Fitzhardinge if (!page) { 2670d160211SJeremy Fitzhardinge kfree_skb(skb); 2681f3c2ebaSDavid Vrabel return NULL; 2690d160211SJeremy Fitzhardinge } 270093b9c71SJan Beulich skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 2710d160211SJeremy Fitzhardinge 2721f3c2ebaSDavid Vrabel /* Align ip header to a 16 bytes boundary */ 2731f3c2ebaSDavid Vrabel skb_reserve(skb, NET_IP_ALIGN); 2742688fcb7SAndrew J. Bennieston skb->dev = queue->info->netdev; 2750d160211SJeremy Fitzhardinge 2761f3c2ebaSDavid Vrabel return skb; 2771f3c2ebaSDavid Vrabel } 2781f3c2ebaSDavid Vrabel 2791f3c2ebaSDavid Vrabel 2801f3c2ebaSDavid Vrabel static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 2811f3c2ebaSDavid Vrabel { 2821f3c2ebaSDavid Vrabel RING_IDX req_prod = queue->rx.req_prod_pvt; 2831f3c2ebaSDavid Vrabel int notify; 2841f3c2ebaSDavid Vrabel 2851f3c2ebaSDavid Vrabel if (unlikely(!netif_carrier_ok(queue->info->netdev))) 2861f3c2ebaSDavid Vrabel return; 2871f3c2ebaSDavid Vrabel 2881f3c2ebaSDavid Vrabel for (req_prod = queue->rx.req_prod_pvt; 2891f3c2ebaSDavid Vrabel req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; 2901f3c2ebaSDavid Vrabel req_prod++) { 2911f3c2ebaSDavid Vrabel struct sk_buff *skb; 2921f3c2ebaSDavid Vrabel unsigned short id; 2931f3c2ebaSDavid Vrabel grant_ref_t ref; 2941f3c2ebaSDavid Vrabel unsigned long pfn; 2951f3c2ebaSDavid Vrabel struct xen_netif_rx_request *req; 2961f3c2ebaSDavid Vrabel 2971f3c2ebaSDavid Vrabel skb = xennet_alloc_one_rx_buffer(queue); 2981f3c2ebaSDavid Vrabel if (!skb) 2991f3c2ebaSDavid Vrabel break; 3001f3c2ebaSDavid Vrabel 3011f3c2ebaSDavid Vrabel id = xennet_rxidx(req_prod); 3020d160211SJeremy Fitzhardinge 3032688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[id]); 3042688fcb7SAndrew J. Bennieston queue->rx_skbs[id] = skb; 3050d160211SJeremy Fitzhardinge 3062688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 3070d160211SJeremy Fitzhardinge BUG_ON((signed short)ref < 0); 3082688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = ref; 3090d160211SJeremy Fitzhardinge 31001c68026SIan Campbell pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 3110d160211SJeremy Fitzhardinge 3121f3c2ebaSDavid Vrabel req = RING_GET_REQUEST(&queue->rx, req_prod); 3130d160211SJeremy Fitzhardinge gnttab_grant_foreign_access_ref(ref, 3142688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id, 3150d160211SJeremy Fitzhardinge pfn_to_mfn(pfn), 3160d160211SJeremy Fitzhardinge 0); 3170d160211SJeremy Fitzhardinge 3180d160211SJeremy Fitzhardinge req->id = id; 3190d160211SJeremy Fitzhardinge req->gref = ref; 3200d160211SJeremy Fitzhardinge } 3210d160211SJeremy Fitzhardinge 3221f3c2ebaSDavid Vrabel queue->rx.req_prod_pvt = req_prod; 3231f3c2ebaSDavid Vrabel 3241f3c2ebaSDavid Vrabel /* Not enough requests? Try again later. */ 3251f3c2ebaSDavid Vrabel if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { 3261f3c2ebaSDavid Vrabel mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 3271f3c2ebaSDavid Vrabel return; 3281f3c2ebaSDavid Vrabel } 3291f3c2ebaSDavid Vrabel 3300d160211SJeremy Fitzhardinge wmb(); /* barrier so backend seens requests */ 3310d160211SJeremy Fitzhardinge 3322688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 3330d160211SJeremy Fitzhardinge if (notify) 3342688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 3350d160211SJeremy Fitzhardinge } 3360d160211SJeremy Fitzhardinge 3370d160211SJeremy Fitzhardinge static int xennet_open(struct net_device *dev) 3380d160211SJeremy Fitzhardinge { 3390d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 3402688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 3412688fcb7SAndrew J. Bennieston unsigned int i = 0; 3422688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 3430d160211SJeremy Fitzhardinge 3442688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 3452688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 3462688fcb7SAndrew J. Bennieston napi_enable(&queue->napi); 347bea3348eSStephen Hemminger 3482688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 3490d160211SJeremy Fitzhardinge if (netif_carrier_ok(dev)) { 3502688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 3512688fcb7SAndrew J. Bennieston queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 3522688fcb7SAndrew J. Bennieston if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 3532688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 3540d160211SJeremy Fitzhardinge } 3552688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 3562688fcb7SAndrew J. Bennieston } 3570d160211SJeremy Fitzhardinge 3582688fcb7SAndrew J. Bennieston netif_tx_start_all_queues(dev); 3590d160211SJeremy Fitzhardinge 3600d160211SJeremy Fitzhardinge return 0; 3610d160211SJeremy Fitzhardinge } 3620d160211SJeremy Fitzhardinge 3632688fcb7SAndrew J. Bennieston static void xennet_tx_buf_gc(struct netfront_queue *queue) 3640d160211SJeremy Fitzhardinge { 3650d160211SJeremy Fitzhardinge RING_IDX cons, prod; 3660d160211SJeremy Fitzhardinge unsigned short id; 3670d160211SJeremy Fitzhardinge struct sk_buff *skb; 3680d160211SJeremy Fitzhardinge 3692688fcb7SAndrew J. Bennieston BUG_ON(!netif_carrier_ok(queue->info->netdev)); 3700d160211SJeremy Fitzhardinge 3710d160211SJeremy Fitzhardinge do { 3722688fcb7SAndrew J. Bennieston prod = queue->tx.sring->rsp_prod; 3730d160211SJeremy Fitzhardinge rmb(); /* Ensure we see responses up to 'rp'. */ 3740d160211SJeremy Fitzhardinge 3752688fcb7SAndrew J. Bennieston for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 3760d160211SJeremy Fitzhardinge struct xen_netif_tx_response *txrsp; 3770d160211SJeremy Fitzhardinge 3782688fcb7SAndrew J. Bennieston txrsp = RING_GET_RESPONSE(&queue->tx, cons); 379f942dc25SIan Campbell if (txrsp->status == XEN_NETIF_RSP_NULL) 3800d160211SJeremy Fitzhardinge continue; 3810d160211SJeremy Fitzhardinge 3820d160211SJeremy Fitzhardinge id = txrsp->id; 3832688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[id].skb; 3840d160211SJeremy Fitzhardinge if (unlikely(gnttab_query_foreign_access( 3852688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id]) != 0)) { 386383eda32SJoe Perches pr_alert("%s: warning -- grant still in use by backend domain\n", 387383eda32SJoe Perches __func__); 3880d160211SJeremy Fitzhardinge BUG(); 3890d160211SJeremy Fitzhardinge } 3900d160211SJeremy Fitzhardinge gnttab_end_foreign_access_ref( 3912688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id], GNTMAP_readonly); 3920d160211SJeremy Fitzhardinge gnttab_release_grant_reference( 3932688fcb7SAndrew J. Bennieston &queue->gref_tx_head, queue->grant_tx_ref[id]); 3942688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id] = GRANT_INVALID_REF; 3952688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = NULL; 3962688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); 3970d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 3980d160211SJeremy Fitzhardinge } 3990d160211SJeremy Fitzhardinge 4002688fcb7SAndrew J. Bennieston queue->tx.rsp_cons = prod; 4010d160211SJeremy Fitzhardinge 4020d160211SJeremy Fitzhardinge /* 4030d160211SJeremy Fitzhardinge * Set a new event, then check for race with update of tx_cons. 4040d160211SJeremy Fitzhardinge * Note that it is essential to schedule a callback, no matter 4050d160211SJeremy Fitzhardinge * how few buffers are pending. Even if there is space in the 4060d160211SJeremy Fitzhardinge * transmit ring, higher layers may be blocked because too much 4070d160211SJeremy Fitzhardinge * data is outstanding: in such cases notification from Xen is 4080d160211SJeremy Fitzhardinge * likely to be the only kick that we'll get. 4090d160211SJeremy Fitzhardinge */ 4102688fcb7SAndrew J. Bennieston queue->tx.sring->rsp_event = 4112688fcb7SAndrew J. Bennieston prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; 4120d160211SJeremy Fitzhardinge mb(); /* update shared area */ 4132688fcb7SAndrew J. Bennieston } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); 4140d160211SJeremy Fitzhardinge 4152688fcb7SAndrew J. Bennieston xennet_maybe_wake_tx(queue); 4160d160211SJeremy Fitzhardinge } 4170d160211SJeremy Fitzhardinge 418a55e8bb8SDavid Vrabel static struct xen_netif_tx_request *xennet_make_one_txreq( 419a55e8bb8SDavid Vrabel struct netfront_queue *queue, struct sk_buff *skb, 420a55e8bb8SDavid Vrabel struct page *page, unsigned int offset, unsigned int len) 4210d160211SJeremy Fitzhardinge { 4220d160211SJeremy Fitzhardinge unsigned int id; 423a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx; 4240d160211SJeremy Fitzhardinge grant_ref_t ref; 4250d160211SJeremy Fitzhardinge 426a55e8bb8SDavid Vrabel len = min_t(unsigned int, PAGE_SIZE - offset, len); 4270d160211SJeremy Fitzhardinge 4282688fcb7SAndrew J. Bennieston id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 429a55e8bb8SDavid Vrabel tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 4302688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 4310d160211SJeremy Fitzhardinge BUG_ON((signed short)ref < 0); 4320d160211SJeremy Fitzhardinge 4332688fcb7SAndrew J. Bennieston gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 434a55e8bb8SDavid Vrabel page_to_mfn(page), GNTMAP_readonly); 4350d160211SJeremy Fitzhardinge 436a55e8bb8SDavid Vrabel queue->tx_skbs[id].skb = skb; 437a55e8bb8SDavid Vrabel queue->grant_tx_page[id] = page; 438a55e8bb8SDavid Vrabel queue->grant_tx_ref[id] = ref; 439a55e8bb8SDavid Vrabel 440a55e8bb8SDavid Vrabel tx->id = id; 441a55e8bb8SDavid Vrabel tx->gref = ref; 4420d160211SJeremy Fitzhardinge tx->offset = offset; 4430d160211SJeremy Fitzhardinge tx->size = len; 4440d160211SJeremy Fitzhardinge tx->flags = 0; 445a55e8bb8SDavid Vrabel 446a55e8bb8SDavid Vrabel return tx; 4470d160211SJeremy Fitzhardinge } 4480d160211SJeremy Fitzhardinge 449a55e8bb8SDavid Vrabel static struct xen_netif_tx_request *xennet_make_txreqs( 450a55e8bb8SDavid Vrabel struct netfront_queue *queue, struct xen_netif_tx_request *tx, 451a55e8bb8SDavid Vrabel struct sk_buff *skb, struct page *page, 452a55e8bb8SDavid Vrabel unsigned int offset, unsigned int len) 453a55e8bb8SDavid Vrabel { 454f36c3747SIan Campbell /* Skip unused frames from start of page */ 455f36c3747SIan Campbell page += offset >> PAGE_SHIFT; 456f36c3747SIan Campbell offset &= ~PAGE_MASK; 457f36c3747SIan Campbell 458a55e8bb8SDavid Vrabel while (len) { 459f942dc25SIan Campbell tx->flags |= XEN_NETTXF_more_data; 460a55e8bb8SDavid Vrabel tx = xennet_make_one_txreq(queue, skb_get(skb), 461a55e8bb8SDavid Vrabel page, offset, len); 462f36c3747SIan Campbell page++; 463f36c3747SIan Campbell offset = 0; 464a55e8bb8SDavid Vrabel len -= tx->size; 4650d160211SJeremy Fitzhardinge } 4660d160211SJeremy Fitzhardinge 467a55e8bb8SDavid Vrabel return tx; 4680d160211SJeremy Fitzhardinge } 4690d160211SJeremy Fitzhardinge 470f36c3747SIan Campbell /* 471e84448d5SDavid Vrabel * Count how many ring slots are required to send this skb. Each frag 472e84448d5SDavid Vrabel * might be a compound page. 473f36c3747SIan Campbell */ 474e84448d5SDavid Vrabel static int xennet_count_skb_slots(struct sk_buff *skb) 475f36c3747SIan Campbell { 476f36c3747SIan Campbell int i, frags = skb_shinfo(skb)->nr_frags; 477e84448d5SDavid Vrabel int pages; 478e84448d5SDavid Vrabel 479e84448d5SDavid Vrabel pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb)); 480f36c3747SIan Campbell 481f36c3747SIan Campbell for (i = 0; i < frags; i++) { 482f36c3747SIan Campbell skb_frag_t *frag = skb_shinfo(skb)->frags + i; 483f36c3747SIan Campbell unsigned long size = skb_frag_size(frag); 484f36c3747SIan Campbell unsigned long offset = frag->page_offset; 485f36c3747SIan Campbell 486f36c3747SIan Campbell /* Skip unused frames from start of page */ 487f36c3747SIan Campbell offset &= ~PAGE_MASK; 488f36c3747SIan Campbell 489f36c3747SIan Campbell pages += PFN_UP(offset + size); 490f36c3747SIan Campbell } 491f36c3747SIan Campbell 492f36c3747SIan Campbell return pages; 493f36c3747SIan Campbell } 494f36c3747SIan Campbell 49550ee6061SAndrew J. Bennieston static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 49650ee6061SAndrew J. Bennieston void *accel_priv, select_queue_fallback_t fallback) 4972688fcb7SAndrew J. Bennieston { 49850ee6061SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 49950ee6061SAndrew J. Bennieston u32 hash; 50050ee6061SAndrew J. Bennieston u16 queue_idx; 50150ee6061SAndrew J. Bennieston 50250ee6061SAndrew J. Bennieston /* First, check if there is only one queue */ 50350ee6061SAndrew J. Bennieston if (num_queues == 1) { 50450ee6061SAndrew J. Bennieston queue_idx = 0; 50550ee6061SAndrew J. Bennieston } else { 50650ee6061SAndrew J. Bennieston hash = skb_get_hash(skb); 50750ee6061SAndrew J. Bennieston queue_idx = hash % num_queues; 50850ee6061SAndrew J. Bennieston } 50950ee6061SAndrew J. Bennieston 51050ee6061SAndrew J. Bennieston return queue_idx; 5112688fcb7SAndrew J. Bennieston } 5122688fcb7SAndrew J. Bennieston 5130d160211SJeremy Fitzhardinge static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 5140d160211SJeremy Fitzhardinge { 5150d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 516900e1833SDavid Vrabel struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 517a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx, *first_tx; 518a55e8bb8SDavid Vrabel unsigned int i; 5190d160211SJeremy Fitzhardinge int notify; 520f36c3747SIan Campbell int slots; 521a55e8bb8SDavid Vrabel struct page *page; 522a55e8bb8SDavid Vrabel unsigned int offset; 523a55e8bb8SDavid Vrabel unsigned int len; 524cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 5252688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 5262688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 5272688fcb7SAndrew J. Bennieston u16 queue_index; 5282688fcb7SAndrew J. Bennieston 5292688fcb7SAndrew J. Bennieston /* Drop the packet if no queues are set up */ 5302688fcb7SAndrew J. Bennieston if (num_queues < 1) 5312688fcb7SAndrew J. Bennieston goto drop; 5322688fcb7SAndrew J. Bennieston /* Determine which queue to transmit this SKB on */ 5332688fcb7SAndrew J. Bennieston queue_index = skb_get_queue_mapping(skb); 5342688fcb7SAndrew J. Bennieston queue = &np->queues[queue_index]; 5350d160211SJeremy Fitzhardinge 5369ecd1a75SWei Liu /* If skb->len is too big for wire format, drop skb and alert 5379ecd1a75SWei Liu * user about misconfiguration. 5389ecd1a75SWei Liu */ 5399ecd1a75SWei Liu if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 5409ecd1a75SWei Liu net_alert_ratelimited( 5419ecd1a75SWei Liu "xennet: skb->len = %u, too big for wire format\n", 5429ecd1a75SWei Liu skb->len); 5439ecd1a75SWei Liu goto drop; 5449ecd1a75SWei Liu } 5459ecd1a75SWei Liu 546e84448d5SDavid Vrabel slots = xennet_count_skb_slots(skb); 547f36c3747SIan Campbell if (unlikely(slots > MAX_SKB_FRAGS + 1)) { 54897a6d1bbSZoltan Kiss net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 54997a6d1bbSZoltan Kiss slots, skb->len); 55097a6d1bbSZoltan Kiss if (skb_linearize(skb)) 5510d160211SJeremy Fitzhardinge goto drop; 5520d160211SJeremy Fitzhardinge } 5530d160211SJeremy Fitzhardinge 554a55e8bb8SDavid Vrabel page = virt_to_page(skb->data); 555a55e8bb8SDavid Vrabel offset = offset_in_page(skb->data); 556a55e8bb8SDavid Vrabel len = skb_headlen(skb); 557a55e8bb8SDavid Vrabel 5582688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 5590d160211SJeremy Fitzhardinge 5600d160211SJeremy Fitzhardinge if (unlikely(!netif_carrier_ok(dev) || 561f36c3747SIan Campbell (slots > 1 && !xennet_can_sg(dev)) || 5628b86a61dSJohannes Berg netif_needs_gso(skb, netif_skb_features(skb)))) { 5632688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 5640d160211SJeremy Fitzhardinge goto drop; 5650d160211SJeremy Fitzhardinge } 5660d160211SJeremy Fitzhardinge 567a55e8bb8SDavid Vrabel /* First request for the linear area. */ 568a55e8bb8SDavid Vrabel first_tx = tx = xennet_make_one_txreq(queue, skb, 569a55e8bb8SDavid Vrabel page, offset, len); 570a55e8bb8SDavid Vrabel page++; 571a55e8bb8SDavid Vrabel offset = 0; 572a55e8bb8SDavid Vrabel len -= tx->size; 5730d160211SJeremy Fitzhardinge 5740d160211SJeremy Fitzhardinge if (skb->ip_summed == CHECKSUM_PARTIAL) 5750d160211SJeremy Fitzhardinge /* local packet? */ 576f942dc25SIan Campbell tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 5770d160211SJeremy Fitzhardinge else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 5780d160211SJeremy Fitzhardinge /* remote but checksummed. */ 579f942dc25SIan Campbell tx->flags |= XEN_NETTXF_data_validated; 5800d160211SJeremy Fitzhardinge 581a55e8bb8SDavid Vrabel /* Optional extra info after the first request. */ 5820d160211SJeremy Fitzhardinge if (skb_shinfo(skb)->gso_size) { 5830d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 5840d160211SJeremy Fitzhardinge 5850d160211SJeremy Fitzhardinge gso = (struct xen_netif_extra_info *) 586a55e8bb8SDavid Vrabel RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 5870d160211SJeremy Fitzhardinge 588f942dc25SIan Campbell tx->flags |= XEN_NETTXF_extra_info; 5890d160211SJeremy Fitzhardinge 5900d160211SJeremy Fitzhardinge gso->u.gso.size = skb_shinfo(skb)->gso_size; 5912c0057deSPaul Durrant gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 5922c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV6 : 5932c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV4; 5940d160211SJeremy Fitzhardinge gso->u.gso.pad = 0; 5950d160211SJeremy Fitzhardinge gso->u.gso.features = 0; 5960d160211SJeremy Fitzhardinge 5970d160211SJeremy Fitzhardinge gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 5980d160211SJeremy Fitzhardinge gso->flags = 0; 5990d160211SJeremy Fitzhardinge } 6000d160211SJeremy Fitzhardinge 601a55e8bb8SDavid Vrabel /* Requests for the rest of the linear area. */ 602a55e8bb8SDavid Vrabel tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); 6030d160211SJeremy Fitzhardinge 604a55e8bb8SDavid Vrabel /* Requests for all the frags. */ 605a55e8bb8SDavid Vrabel for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 606a55e8bb8SDavid Vrabel skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 607a55e8bb8SDavid Vrabel tx = xennet_make_txreqs(queue, tx, skb, 608a55e8bb8SDavid Vrabel skb_frag_page(frag), frag->page_offset, 609a55e8bb8SDavid Vrabel skb_frag_size(frag)); 610a55e8bb8SDavid Vrabel } 611a55e8bb8SDavid Vrabel 612a55e8bb8SDavid Vrabel /* First request has the packet length. */ 613a55e8bb8SDavid Vrabel first_tx->size = skb->len; 6140d160211SJeremy Fitzhardinge 6152688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 6160d160211SJeremy Fitzhardinge if (notify) 6172688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 6180d160211SJeremy Fitzhardinge 619900e1833SDavid Vrabel u64_stats_update_begin(&tx_stats->syncp); 620900e1833SDavid Vrabel tx_stats->bytes += skb->len; 621900e1833SDavid Vrabel tx_stats->packets++; 622900e1833SDavid Vrabel u64_stats_update_end(&tx_stats->syncp); 62310a273a6SJeremy Fitzhardinge 62410a273a6SJeremy Fitzhardinge /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 6252688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 6260d160211SJeremy Fitzhardinge 6272688fcb7SAndrew J. Bennieston if (!netfront_tx_slot_available(queue)) 6282688fcb7SAndrew J. Bennieston netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 6290d160211SJeremy Fitzhardinge 6302688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 6310d160211SJeremy Fitzhardinge 6326ed10654SPatrick McHardy return NETDEV_TX_OK; 6330d160211SJeremy Fitzhardinge 6340d160211SJeremy Fitzhardinge drop: 63509f75cd7SJeff Garzik dev->stats.tx_dropped++; 636979de8a0SEric W. Biederman dev_kfree_skb_any(skb); 6376ed10654SPatrick McHardy return NETDEV_TX_OK; 6380d160211SJeremy Fitzhardinge } 6390d160211SJeremy Fitzhardinge 6400d160211SJeremy Fitzhardinge static int xennet_close(struct net_device *dev) 6410d160211SJeremy Fitzhardinge { 6420d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 6432688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 6442688fcb7SAndrew J. Bennieston unsigned int i; 6452688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 6462688fcb7SAndrew J. Bennieston netif_tx_stop_all_queues(np->netdev); 6472688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 6482688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 6492688fcb7SAndrew J. Bennieston napi_disable(&queue->napi); 6502688fcb7SAndrew J. Bennieston } 6510d160211SJeremy Fitzhardinge return 0; 6520d160211SJeremy Fitzhardinge } 6530d160211SJeremy Fitzhardinge 6542688fcb7SAndrew J. Bennieston static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 6550d160211SJeremy Fitzhardinge grant_ref_t ref) 6560d160211SJeremy Fitzhardinge { 6572688fcb7SAndrew J. Bennieston int new = xennet_rxidx(queue->rx.req_prod_pvt); 6580d160211SJeremy Fitzhardinge 6592688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[new]); 6602688fcb7SAndrew J. Bennieston queue->rx_skbs[new] = skb; 6612688fcb7SAndrew J. Bennieston queue->grant_rx_ref[new] = ref; 6622688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 6632688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 6642688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt++; 6650d160211SJeremy Fitzhardinge } 6660d160211SJeremy Fitzhardinge 6672688fcb7SAndrew J. Bennieston static int xennet_get_extras(struct netfront_queue *queue, 6680d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras, 6690d160211SJeremy Fitzhardinge RING_IDX rp) 6700d160211SJeremy Fitzhardinge 6710d160211SJeremy Fitzhardinge { 6720d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extra; 6732688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 6742688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 6750d160211SJeremy Fitzhardinge int err = 0; 6760d160211SJeremy Fitzhardinge 6770d160211SJeremy Fitzhardinge do { 6780d160211SJeremy Fitzhardinge struct sk_buff *skb; 6790d160211SJeremy Fitzhardinge grant_ref_t ref; 6800d160211SJeremy Fitzhardinge 6810d160211SJeremy Fitzhardinge if (unlikely(cons + 1 == rp)) { 6820d160211SJeremy Fitzhardinge if (net_ratelimit()) 6830d160211SJeremy Fitzhardinge dev_warn(dev, "Missing extra info\n"); 6840d160211SJeremy Fitzhardinge err = -EBADR; 6850d160211SJeremy Fitzhardinge break; 6860d160211SJeremy Fitzhardinge } 6870d160211SJeremy Fitzhardinge 6880d160211SJeremy Fitzhardinge extra = (struct xen_netif_extra_info *) 6892688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 6900d160211SJeremy Fitzhardinge 6910d160211SJeremy Fitzhardinge if (unlikely(!extra->type || 6920d160211SJeremy Fitzhardinge extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 6930d160211SJeremy Fitzhardinge if (net_ratelimit()) 6940d160211SJeremy Fitzhardinge dev_warn(dev, "Invalid extra type: %d\n", 6950d160211SJeremy Fitzhardinge extra->type); 6960d160211SJeremy Fitzhardinge err = -EINVAL; 6970d160211SJeremy Fitzhardinge } else { 6980d160211SJeremy Fitzhardinge memcpy(&extras[extra->type - 1], extra, 6990d160211SJeremy Fitzhardinge sizeof(*extra)); 7000d160211SJeremy Fitzhardinge } 7010d160211SJeremy Fitzhardinge 7022688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons); 7032688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons); 7042688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 7050d160211SJeremy Fitzhardinge } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 7060d160211SJeremy Fitzhardinge 7072688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons; 7080d160211SJeremy Fitzhardinge return err; 7090d160211SJeremy Fitzhardinge } 7100d160211SJeremy Fitzhardinge 7112688fcb7SAndrew J. Bennieston static int xennet_get_responses(struct netfront_queue *queue, 7120d160211SJeremy Fitzhardinge struct netfront_rx_info *rinfo, RING_IDX rp, 7130d160211SJeremy Fitzhardinge struct sk_buff_head *list) 7140d160211SJeremy Fitzhardinge { 7150d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo->rx; 7160d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo->extras; 7172688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7182688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7192688fcb7SAndrew J. Bennieston struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 7202688fcb7SAndrew J. Bennieston grant_ref_t ref = xennet_get_rx_ref(queue, cons); 7210d160211SJeremy Fitzhardinge int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 7227158ff6dSWei Liu int slots = 1; 7230d160211SJeremy Fitzhardinge int err = 0; 7240d160211SJeremy Fitzhardinge unsigned long ret; 7250d160211SJeremy Fitzhardinge 726f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_extra_info) { 7272688fcb7SAndrew J. Bennieston err = xennet_get_extras(queue, extras, rp); 7282688fcb7SAndrew J. Bennieston cons = queue->rx.rsp_cons; 7290d160211SJeremy Fitzhardinge } 7300d160211SJeremy Fitzhardinge 7310d160211SJeremy Fitzhardinge for (;;) { 7320d160211SJeremy Fitzhardinge if (unlikely(rx->status < 0 || 7330d160211SJeremy Fitzhardinge rx->offset + rx->status > PAGE_SIZE)) { 7340d160211SJeremy Fitzhardinge if (net_ratelimit()) 7356c10127dSJulien Grall dev_warn(dev, "rx->offset: %u, size: %d\n", 7360d160211SJeremy Fitzhardinge rx->offset, rx->status); 7372688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 7380d160211SJeremy Fitzhardinge err = -EINVAL; 7390d160211SJeremy Fitzhardinge goto next; 7400d160211SJeremy Fitzhardinge } 7410d160211SJeremy Fitzhardinge 7420d160211SJeremy Fitzhardinge /* 7430d160211SJeremy Fitzhardinge * This definitely indicates a bug, either in this driver or in 7440d160211SJeremy Fitzhardinge * the backend driver. In future this should flag the bad 745697089dcSWei Liu * situation to the system controller to reboot the backend. 7460d160211SJeremy Fitzhardinge */ 7470d160211SJeremy Fitzhardinge if (ref == GRANT_INVALID_REF) { 7480d160211SJeremy Fitzhardinge if (net_ratelimit()) 7490d160211SJeremy Fitzhardinge dev_warn(dev, "Bad rx response id %d.\n", 7500d160211SJeremy Fitzhardinge rx->id); 7510d160211SJeremy Fitzhardinge err = -EINVAL; 7520d160211SJeremy Fitzhardinge goto next; 7530d160211SJeremy Fitzhardinge } 7540d160211SJeremy Fitzhardinge 7550d160211SJeremy Fitzhardinge ret = gnttab_end_foreign_access_ref(ref, 0); 7560d160211SJeremy Fitzhardinge BUG_ON(!ret); 7570d160211SJeremy Fitzhardinge 7582688fcb7SAndrew J. Bennieston gnttab_release_grant_reference(&queue->gref_rx_head, ref); 7590d160211SJeremy Fitzhardinge 7600d160211SJeremy Fitzhardinge __skb_queue_tail(list, skb); 7610d160211SJeremy Fitzhardinge 7620d160211SJeremy Fitzhardinge next: 763f942dc25SIan Campbell if (!(rx->flags & XEN_NETRXF_more_data)) 7640d160211SJeremy Fitzhardinge break; 7650d160211SJeremy Fitzhardinge 7667158ff6dSWei Liu if (cons + slots == rp) { 7670d160211SJeremy Fitzhardinge if (net_ratelimit()) 7687158ff6dSWei Liu dev_warn(dev, "Need more slots\n"); 7690d160211SJeremy Fitzhardinge err = -ENOENT; 7700d160211SJeremy Fitzhardinge break; 7710d160211SJeremy Fitzhardinge } 7720d160211SJeremy Fitzhardinge 7732688fcb7SAndrew J. Bennieston rx = RING_GET_RESPONSE(&queue->rx, cons + slots); 7742688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons + slots); 7752688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons + slots); 7767158ff6dSWei Liu slots++; 7770d160211SJeremy Fitzhardinge } 7780d160211SJeremy Fitzhardinge 7797158ff6dSWei Liu if (unlikely(slots > max)) { 7800d160211SJeremy Fitzhardinge if (net_ratelimit()) 781697089dcSWei Liu dev_warn(dev, "Too many slots\n"); 7820d160211SJeremy Fitzhardinge err = -E2BIG; 7830d160211SJeremy Fitzhardinge } 7840d160211SJeremy Fitzhardinge 7850d160211SJeremy Fitzhardinge if (unlikely(err)) 7862688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons + slots; 7870d160211SJeremy Fitzhardinge 7880d160211SJeremy Fitzhardinge return err; 7890d160211SJeremy Fitzhardinge } 7900d160211SJeremy Fitzhardinge 7910d160211SJeremy Fitzhardinge static int xennet_set_skb_gso(struct sk_buff *skb, 7920d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso) 7930d160211SJeremy Fitzhardinge { 7940d160211SJeremy Fitzhardinge if (!gso->u.gso.size) { 7950d160211SJeremy Fitzhardinge if (net_ratelimit()) 796383eda32SJoe Perches pr_warn("GSO size must not be zero\n"); 7970d160211SJeremy Fitzhardinge return -EINVAL; 7980d160211SJeremy Fitzhardinge } 7990d160211SJeremy Fitzhardinge 8002c0057deSPaul Durrant if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 8012c0057deSPaul Durrant gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 8020d160211SJeremy Fitzhardinge if (net_ratelimit()) 803383eda32SJoe Perches pr_warn("Bad GSO type %d\n", gso->u.gso.type); 8040d160211SJeremy Fitzhardinge return -EINVAL; 8050d160211SJeremy Fitzhardinge } 8060d160211SJeremy Fitzhardinge 8070d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_size = gso->u.gso.size; 8082c0057deSPaul Durrant skb_shinfo(skb)->gso_type = 8092c0057deSPaul Durrant (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 8102c0057deSPaul Durrant SKB_GSO_TCPV4 : 8112c0057deSPaul Durrant SKB_GSO_TCPV6; 8120d160211SJeremy Fitzhardinge 8130d160211SJeremy Fitzhardinge /* Header must be checked, and gso_segs computed. */ 8140d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 8150d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_segs = 0; 8160d160211SJeremy Fitzhardinge 8170d160211SJeremy Fitzhardinge return 0; 8180d160211SJeremy Fitzhardinge } 8190d160211SJeremy Fitzhardinge 8202688fcb7SAndrew J. Bennieston static RING_IDX xennet_fill_frags(struct netfront_queue *queue, 8210d160211SJeremy Fitzhardinge struct sk_buff *skb, 8220d160211SJeremy Fitzhardinge struct sk_buff_head *list) 8230d160211SJeremy Fitzhardinge { 8240d160211SJeremy Fitzhardinge struct skb_shared_info *shinfo = skb_shinfo(skb); 8252688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 8260d160211SJeremy Fitzhardinge struct sk_buff *nskb; 8270d160211SJeremy Fitzhardinge 8280d160211SJeremy Fitzhardinge while ((nskb = __skb_dequeue(list))) { 8290d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = 8302688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 83101c68026SIan Campbell skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 8320d160211SJeremy Fitzhardinge 833093b9c71SJan Beulich if (shinfo->nr_frags == MAX_SKB_FRAGS) { 834093b9c71SJan Beulich unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 8350d160211SJeremy Fitzhardinge 836093b9c71SJan Beulich BUG_ON(pull_to <= skb_headlen(skb)); 837093b9c71SJan Beulich __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 838093b9c71SJan Beulich } 839093b9c71SJan Beulich BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); 840093b9c71SJan Beulich 841093b9c71SJan Beulich skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), 842093b9c71SJan Beulich rx->offset, rx->status, PAGE_SIZE); 8430d160211SJeremy Fitzhardinge 8440d160211SJeremy Fitzhardinge skb_shinfo(nskb)->nr_frags = 0; 8450d160211SJeremy Fitzhardinge kfree_skb(nskb); 8460d160211SJeremy Fitzhardinge } 8470d160211SJeremy Fitzhardinge 8480d160211SJeremy Fitzhardinge return cons; 8490d160211SJeremy Fitzhardinge } 8500d160211SJeremy Fitzhardinge 851e0ce4af9SIan Campbell static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 8520d160211SJeremy Fitzhardinge { 853b5cf66cdSPaul Durrant bool recalculate_partial_csum = false; 854e0ce4af9SIan Campbell 855e0ce4af9SIan Campbell /* 856e0ce4af9SIan Campbell * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 857e0ce4af9SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO 858e0ce4af9SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and 859e0ce4af9SIan Campbell * recalculate the partial checksum. 860e0ce4af9SIan Campbell */ 861e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 862e0ce4af9SIan Campbell struct netfront_info *np = netdev_priv(dev); 8632688fcb7SAndrew J. Bennieston atomic_inc(&np->rx_gso_checksum_fixup); 864e0ce4af9SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL; 865b5cf66cdSPaul Durrant recalculate_partial_csum = true; 866e0ce4af9SIan Campbell } 867e0ce4af9SIan Campbell 868e0ce4af9SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 869e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL) 870e0ce4af9SIan Campbell return 0; 8710d160211SJeremy Fitzhardinge 872b5cf66cdSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum); 8730d160211SJeremy Fitzhardinge } 8740d160211SJeremy Fitzhardinge 8752688fcb7SAndrew J. Bennieston static int handle_incoming_queue(struct netfront_queue *queue, 8760d160211SJeremy Fitzhardinge struct sk_buff_head *rxq) 8770d160211SJeremy Fitzhardinge { 878900e1833SDavid Vrabel struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 8790d160211SJeremy Fitzhardinge int packets_dropped = 0; 8800d160211SJeremy Fitzhardinge struct sk_buff *skb; 8810d160211SJeremy Fitzhardinge 8820d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(rxq)) != NULL) { 8833683243bSIan Campbell int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 8840d160211SJeremy Fitzhardinge 885093b9c71SJan Beulich if (pull_to > skb_headlen(skb)) 8863683243bSIan Campbell __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 8870d160211SJeremy Fitzhardinge 8880d160211SJeremy Fitzhardinge /* Ethernet work: Delayed to here as it peeks the header. */ 8892688fcb7SAndrew J. Bennieston skb->protocol = eth_type_trans(skb, queue->info->netdev); 890d554f73dSWei Liu skb_reset_network_header(skb); 8910d160211SJeremy Fitzhardinge 8922688fcb7SAndrew J. Bennieston if (checksum_setup(queue->info->netdev, skb)) { 8930d160211SJeremy Fitzhardinge kfree_skb(skb); 8940d160211SJeremy Fitzhardinge packets_dropped++; 8952688fcb7SAndrew J. Bennieston queue->info->netdev->stats.rx_errors++; 8960d160211SJeremy Fitzhardinge continue; 8970d160211SJeremy Fitzhardinge } 8980d160211SJeremy Fitzhardinge 899900e1833SDavid Vrabel u64_stats_update_begin(&rx_stats->syncp); 900900e1833SDavid Vrabel rx_stats->packets++; 901900e1833SDavid Vrabel rx_stats->bytes += skb->len; 902900e1833SDavid Vrabel u64_stats_update_end(&rx_stats->syncp); 9030d160211SJeremy Fitzhardinge 9040d160211SJeremy Fitzhardinge /* Pass it up. */ 9052688fcb7SAndrew J. Bennieston napi_gro_receive(&queue->napi, skb); 9060d160211SJeremy Fitzhardinge } 9070d160211SJeremy Fitzhardinge 9080d160211SJeremy Fitzhardinge return packets_dropped; 9090d160211SJeremy Fitzhardinge } 9100d160211SJeremy Fitzhardinge 911bea3348eSStephen Hemminger static int xennet_poll(struct napi_struct *napi, int budget) 9120d160211SJeremy Fitzhardinge { 9132688fcb7SAndrew J. Bennieston struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 9142688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 9150d160211SJeremy Fitzhardinge struct sk_buff *skb; 9160d160211SJeremy Fitzhardinge struct netfront_rx_info rinfo; 9170d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo.rx; 9180d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo.extras; 9190d160211SJeremy Fitzhardinge RING_IDX i, rp; 920bea3348eSStephen Hemminger int work_done; 9210d160211SJeremy Fitzhardinge struct sk_buff_head rxq; 9220d160211SJeremy Fitzhardinge struct sk_buff_head errq; 9230d160211SJeremy Fitzhardinge struct sk_buff_head tmpq; 9240d160211SJeremy Fitzhardinge int err; 9250d160211SJeremy Fitzhardinge 9262688fcb7SAndrew J. Bennieston spin_lock(&queue->rx_lock); 9270d160211SJeremy Fitzhardinge 9280d160211SJeremy Fitzhardinge skb_queue_head_init(&rxq); 9290d160211SJeremy Fitzhardinge skb_queue_head_init(&errq); 9300d160211SJeremy Fitzhardinge skb_queue_head_init(&tmpq); 9310d160211SJeremy Fitzhardinge 9322688fcb7SAndrew J. Bennieston rp = queue->rx.sring->rsp_prod; 9330d160211SJeremy Fitzhardinge rmb(); /* Ensure we see queued responses up to 'rp'. */ 9340d160211SJeremy Fitzhardinge 9352688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 9360d160211SJeremy Fitzhardinge work_done = 0; 9370d160211SJeremy Fitzhardinge while ((i != rp) && (work_done < budget)) { 9382688fcb7SAndrew J. Bennieston memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); 9390d160211SJeremy Fitzhardinge memset(extras, 0, sizeof(rinfo.extras)); 9400d160211SJeremy Fitzhardinge 9412688fcb7SAndrew J. Bennieston err = xennet_get_responses(queue, &rinfo, rp, &tmpq); 9420d160211SJeremy Fitzhardinge 9430d160211SJeremy Fitzhardinge if (unlikely(err)) { 9440d160211SJeremy Fitzhardinge err: 9450d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(&tmpq))) 9460d160211SJeremy Fitzhardinge __skb_queue_tail(&errq, skb); 94709f75cd7SJeff Garzik dev->stats.rx_errors++; 9482688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 9490d160211SJeremy Fitzhardinge continue; 9500d160211SJeremy Fitzhardinge } 9510d160211SJeremy Fitzhardinge 9520d160211SJeremy Fitzhardinge skb = __skb_dequeue(&tmpq); 9530d160211SJeremy Fitzhardinge 9540d160211SJeremy Fitzhardinge if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 9550d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 9560d160211SJeremy Fitzhardinge gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 9570d160211SJeremy Fitzhardinge 9580d160211SJeremy Fitzhardinge if (unlikely(xennet_set_skb_gso(skb, gso))) { 9590d160211SJeremy Fitzhardinge __skb_queue_head(&tmpq, skb); 9602688fcb7SAndrew J. Bennieston queue->rx.rsp_cons += skb_queue_len(&tmpq); 9610d160211SJeremy Fitzhardinge goto err; 9620d160211SJeremy Fitzhardinge } 9630d160211SJeremy Fitzhardinge } 9640d160211SJeremy Fitzhardinge 9653683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = rx->status; 9663683243bSIan Campbell if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 9673683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 9680d160211SJeremy Fitzhardinge 9693683243bSIan Campbell skb_shinfo(skb)->frags[0].page_offset = rx->offset; 9703683243bSIan Campbell skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 9713683243bSIan Campbell skb->data_len = rx->status; 972093b9c71SJan Beulich skb->len += rx->status; 9730d160211SJeremy Fitzhardinge 9742688fcb7SAndrew J. Bennieston i = xennet_fill_frags(queue, skb, &tmpq); 9750d160211SJeremy Fitzhardinge 976f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_csum_blank) 9770d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_PARTIAL; 978f942dc25SIan Campbell else if (rx->flags & XEN_NETRXF_data_validated) 9790d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_UNNECESSARY; 9800d160211SJeremy Fitzhardinge 9810d160211SJeremy Fitzhardinge __skb_queue_tail(&rxq, skb); 9820d160211SJeremy Fitzhardinge 9832688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = ++i; 9840d160211SJeremy Fitzhardinge work_done++; 9850d160211SJeremy Fitzhardinge } 9860d160211SJeremy Fitzhardinge 98756cfe5d0SWang Chen __skb_queue_purge(&errq); 9880d160211SJeremy Fitzhardinge 9892688fcb7SAndrew J. Bennieston work_done -= handle_incoming_queue(queue, &rxq); 9900d160211SJeremy Fitzhardinge 9912688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 9920d160211SJeremy Fitzhardinge 9930d160211SJeremy Fitzhardinge if (work_done < budget) { 994bea3348eSStephen Hemminger int more_to_do = 0; 995bea3348eSStephen Hemminger 9966a6dc08fSDavid Vrabel napi_complete(napi); 9970d160211SJeremy Fitzhardinge 9982688fcb7SAndrew J. Bennieston RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 9996a6dc08fSDavid Vrabel if (more_to_do) 10006a6dc08fSDavid Vrabel napi_schedule(napi); 10010d160211SJeremy Fitzhardinge } 10020d160211SJeremy Fitzhardinge 10032688fcb7SAndrew J. Bennieston spin_unlock(&queue->rx_lock); 10040d160211SJeremy Fitzhardinge 1005bea3348eSStephen Hemminger return work_done; 10060d160211SJeremy Fitzhardinge } 10070d160211SJeremy Fitzhardinge 10080d160211SJeremy Fitzhardinge static int xennet_change_mtu(struct net_device *dev, int mtu) 10090d160211SJeremy Fitzhardinge { 10100c36820eSJonathan Davies int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 10110d160211SJeremy Fitzhardinge 10120d160211SJeremy Fitzhardinge if (mtu > max) 10130d160211SJeremy Fitzhardinge return -EINVAL; 10140d160211SJeremy Fitzhardinge dev->mtu = mtu; 10150d160211SJeremy Fitzhardinge return 0; 10160d160211SJeremy Fitzhardinge } 10170d160211SJeremy Fitzhardinge 1018e00f85beSstephen hemminger static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, 1019e00f85beSstephen hemminger struct rtnl_link_stats64 *tot) 1020e00f85beSstephen hemminger { 1021e00f85beSstephen hemminger struct netfront_info *np = netdev_priv(dev); 1022e00f85beSstephen hemminger int cpu; 1023e00f85beSstephen hemminger 1024e00f85beSstephen hemminger for_each_possible_cpu(cpu) { 1025900e1833SDavid Vrabel struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1026900e1833SDavid Vrabel struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1027e00f85beSstephen hemminger u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1028e00f85beSstephen hemminger unsigned int start; 1029e00f85beSstephen hemminger 1030e00f85beSstephen hemminger do { 1031900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1032900e1833SDavid Vrabel tx_packets = tx_stats->packets; 1033900e1833SDavid Vrabel tx_bytes = tx_stats->bytes; 1034900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1035e00f85beSstephen hemminger 1036900e1833SDavid Vrabel do { 1037900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1038900e1833SDavid Vrabel rx_packets = rx_stats->packets; 1039900e1833SDavid Vrabel rx_bytes = rx_stats->bytes; 1040900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1041e00f85beSstephen hemminger 1042e00f85beSstephen hemminger tot->rx_packets += rx_packets; 1043e00f85beSstephen hemminger tot->tx_packets += tx_packets; 1044e00f85beSstephen hemminger tot->rx_bytes += rx_bytes; 1045e00f85beSstephen hemminger tot->tx_bytes += tx_bytes; 1046e00f85beSstephen hemminger } 1047e00f85beSstephen hemminger 1048e00f85beSstephen hemminger tot->rx_errors = dev->stats.rx_errors; 1049e00f85beSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1050e00f85beSstephen hemminger 1051e00f85beSstephen hemminger return tot; 1052e00f85beSstephen hemminger } 1053e00f85beSstephen hemminger 10542688fcb7SAndrew J. Bennieston static void xennet_release_tx_bufs(struct netfront_queue *queue) 10550d160211SJeremy Fitzhardinge { 10560d160211SJeremy Fitzhardinge struct sk_buff *skb; 10570d160211SJeremy Fitzhardinge int i; 10580d160211SJeremy Fitzhardinge 10590d160211SJeremy Fitzhardinge for (i = 0; i < NET_TX_RING_SIZE; i++) { 10600d160211SJeremy Fitzhardinge /* Skip over entries which are actually freelist references */ 10612688fcb7SAndrew J. Bennieston if (skb_entry_is_link(&queue->tx_skbs[i])) 10620d160211SJeremy Fitzhardinge continue; 10630d160211SJeremy Fitzhardinge 10642688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[i].skb; 10652688fcb7SAndrew J. Bennieston get_page(queue->grant_tx_page[i]); 10662688fcb7SAndrew J. Bennieston gnttab_end_foreign_access(queue->grant_tx_ref[i], 1067cefe0078SAnnie Li GNTMAP_readonly, 10682688fcb7SAndrew J. Bennieston (unsigned long)page_address(queue->grant_tx_page[i])); 10692688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 10702688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 10712688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); 10720d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 10730d160211SJeremy Fitzhardinge } 10740d160211SJeremy Fitzhardinge } 10750d160211SJeremy Fitzhardinge 10762688fcb7SAndrew J. Bennieston static void xennet_release_rx_bufs(struct netfront_queue *queue) 10770d160211SJeremy Fitzhardinge { 10780d160211SJeremy Fitzhardinge int id, ref; 10790d160211SJeremy Fitzhardinge 10802688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 10810d160211SJeremy Fitzhardinge 10820d160211SJeremy Fitzhardinge for (id = 0; id < NET_RX_RING_SIZE; id++) { 1083cefe0078SAnnie Li struct sk_buff *skb; 1084cefe0078SAnnie Li struct page *page; 10850d160211SJeremy Fitzhardinge 10862688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[id]; 1087cefe0078SAnnie Li if (!skb) 1088cefe0078SAnnie Li continue; 1089cefe0078SAnnie Li 10902688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[id]; 1091cefe0078SAnnie Li if (ref == GRANT_INVALID_REF) 1092cefe0078SAnnie Li continue; 1093cefe0078SAnnie Li 1094cefe0078SAnnie Li page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1095cefe0078SAnnie Li 1096cefe0078SAnnie Li /* gnttab_end_foreign_access() needs a page ref until 1097cefe0078SAnnie Li * foreign access is ended (which may be deferred). 1098cefe0078SAnnie Li */ 1099cefe0078SAnnie Li get_page(page); 1100cefe0078SAnnie Li gnttab_end_foreign_access(ref, 0, 1101cefe0078SAnnie Li (unsigned long)page_address(page)); 11022688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = GRANT_INVALID_REF; 11030d160211SJeremy Fitzhardinge 1104cefe0078SAnnie Li kfree_skb(skb); 11050d160211SJeremy Fitzhardinge } 11060d160211SJeremy Fitzhardinge 11072688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 11080d160211SJeremy Fitzhardinge } 11090d160211SJeremy Fitzhardinge 1110c8f44affSMichał Mirosław static netdev_features_t xennet_fix_features(struct net_device *dev, 1111c8f44affSMichał Mirosław netdev_features_t features) 11128f7b01a1SEric Dumazet { 11138f7b01a1SEric Dumazet struct netfront_info *np = netdev_priv(dev); 11148f7b01a1SEric Dumazet int val; 11158f7b01a1SEric Dumazet 11168f7b01a1SEric Dumazet if (features & NETIF_F_SG) { 11178f7b01a1SEric Dumazet if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 11188f7b01a1SEric Dumazet "%d", &val) < 0) 11198f7b01a1SEric Dumazet val = 0; 11208f7b01a1SEric Dumazet 11218f7b01a1SEric Dumazet if (!val) 11228f7b01a1SEric Dumazet features &= ~NETIF_F_SG; 11238f7b01a1SEric Dumazet } 11248f7b01a1SEric Dumazet 11252c0057deSPaul Durrant if (features & NETIF_F_IPV6_CSUM) { 11262c0057deSPaul Durrant if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 11272c0057deSPaul Durrant "feature-ipv6-csum-offload", "%d", &val) < 0) 11282c0057deSPaul Durrant val = 0; 11292c0057deSPaul Durrant 11302c0057deSPaul Durrant if (!val) 11312c0057deSPaul Durrant features &= ~NETIF_F_IPV6_CSUM; 11322c0057deSPaul Durrant } 11332c0057deSPaul Durrant 11348f7b01a1SEric Dumazet if (features & NETIF_F_TSO) { 11358f7b01a1SEric Dumazet if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 11368f7b01a1SEric Dumazet "feature-gso-tcpv4", "%d", &val) < 0) 11378f7b01a1SEric Dumazet val = 0; 11388f7b01a1SEric Dumazet 11398f7b01a1SEric Dumazet if (!val) 11408f7b01a1SEric Dumazet features &= ~NETIF_F_TSO; 11418f7b01a1SEric Dumazet } 11428f7b01a1SEric Dumazet 11432c0057deSPaul Durrant if (features & NETIF_F_TSO6) { 11442c0057deSPaul Durrant if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 11452c0057deSPaul Durrant "feature-gso-tcpv6", "%d", &val) < 0) 11462c0057deSPaul Durrant val = 0; 11472c0057deSPaul Durrant 11482c0057deSPaul Durrant if (!val) 11492c0057deSPaul Durrant features &= ~NETIF_F_TSO6; 11502c0057deSPaul Durrant } 11512c0057deSPaul Durrant 11528f7b01a1SEric Dumazet return features; 11538f7b01a1SEric Dumazet } 11548f7b01a1SEric Dumazet 1155c8f44affSMichał Mirosław static int xennet_set_features(struct net_device *dev, 1156c8f44affSMichał Mirosław netdev_features_t features) 11578f7b01a1SEric Dumazet { 11588f7b01a1SEric Dumazet if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 11598f7b01a1SEric Dumazet netdev_info(dev, "Reducing MTU because no SG offload"); 11608f7b01a1SEric Dumazet dev->mtu = ETH_DATA_LEN; 11618f7b01a1SEric Dumazet } 11628f7b01a1SEric Dumazet 11638f7b01a1SEric Dumazet return 0; 11648f7b01a1SEric Dumazet } 11658f7b01a1SEric Dumazet 1166d634bf2cSWei Liu static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1167cf66f9d4SKonrad Rzeszutek Wilk { 11682688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 1169cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 1170cf66f9d4SKonrad Rzeszutek Wilk 11712688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 11722688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 11732688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 1174cf66f9d4SKonrad Rzeszutek Wilk 1175cf66f9d4SKonrad Rzeszutek Wilk return IRQ_HANDLED; 1176cf66f9d4SKonrad Rzeszutek Wilk } 1177cf66f9d4SKonrad Rzeszutek Wilk 1178d634bf2cSWei Liu static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1179d634bf2cSWei Liu { 11802688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 11812688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 1182d634bf2cSWei Liu 1183d634bf2cSWei Liu if (likely(netif_carrier_ok(dev) && 11842688fcb7SAndrew J. Bennieston RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 11852688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 1186d634bf2cSWei Liu 1187d634bf2cSWei Liu return IRQ_HANDLED; 1188d634bf2cSWei Liu } 1189d634bf2cSWei Liu 1190d634bf2cSWei Liu static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1191d634bf2cSWei Liu { 1192d634bf2cSWei Liu xennet_tx_interrupt(irq, dev_id); 1193d634bf2cSWei Liu xennet_rx_interrupt(irq, dev_id); 1194d634bf2cSWei Liu return IRQ_HANDLED; 1195d634bf2cSWei Liu } 1196d634bf2cSWei Liu 1197cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1198cf66f9d4SKonrad Rzeszutek Wilk static void xennet_poll_controller(struct net_device *dev) 1199cf66f9d4SKonrad Rzeszutek Wilk { 12002688fcb7SAndrew J. Bennieston /* Poll each queue */ 12012688fcb7SAndrew J. Bennieston struct netfront_info *info = netdev_priv(dev); 12022688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 12032688fcb7SAndrew J. Bennieston unsigned int i; 12042688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) 12052688fcb7SAndrew J. Bennieston xennet_interrupt(0, &info->queues[i]); 1206cf66f9d4SKonrad Rzeszutek Wilk } 1207cf66f9d4SKonrad Rzeszutek Wilk #endif 1208cf66f9d4SKonrad Rzeszutek Wilk 12090a0b9d2eSStephen Hemminger static const struct net_device_ops xennet_netdev_ops = { 12100a0b9d2eSStephen Hemminger .ndo_open = xennet_open, 12110a0b9d2eSStephen Hemminger .ndo_stop = xennet_close, 12120a0b9d2eSStephen Hemminger .ndo_start_xmit = xennet_start_xmit, 12130a0b9d2eSStephen Hemminger .ndo_change_mtu = xennet_change_mtu, 1214e00f85beSstephen hemminger .ndo_get_stats64 = xennet_get_stats64, 12150a0b9d2eSStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 12160a0b9d2eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1217fb507934SMichał Mirosław .ndo_fix_features = xennet_fix_features, 1218fb507934SMichał Mirosław .ndo_set_features = xennet_set_features, 12192688fcb7SAndrew J. Bennieston .ndo_select_queue = xennet_select_queue, 1220cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1221cf66f9d4SKonrad Rzeszutek Wilk .ndo_poll_controller = xennet_poll_controller, 1222cf66f9d4SKonrad Rzeszutek Wilk #endif 12230a0b9d2eSStephen Hemminger }; 12240a0b9d2eSStephen Hemminger 1225900e1833SDavid Vrabel static void xennet_free_netdev(struct net_device *netdev) 1226900e1833SDavid Vrabel { 1227900e1833SDavid Vrabel struct netfront_info *np = netdev_priv(netdev); 1228900e1833SDavid Vrabel 1229900e1833SDavid Vrabel free_percpu(np->rx_stats); 1230900e1833SDavid Vrabel free_percpu(np->tx_stats); 1231900e1833SDavid Vrabel free_netdev(netdev); 1232900e1833SDavid Vrabel } 1233900e1833SDavid Vrabel 12348e0e46bbSBill Pemberton static struct net_device *xennet_create_dev(struct xenbus_device *dev) 12350d160211SJeremy Fitzhardinge { 12362688fcb7SAndrew J. Bennieston int err; 12370d160211SJeremy Fitzhardinge struct net_device *netdev; 12380d160211SJeremy Fitzhardinge struct netfront_info *np; 12390d160211SJeremy Fitzhardinge 124050ee6061SAndrew J. Bennieston netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 124141de8d4cSJoe Perches if (!netdev) 12420d160211SJeremy Fitzhardinge return ERR_PTR(-ENOMEM); 12430d160211SJeremy Fitzhardinge 12440d160211SJeremy Fitzhardinge np = netdev_priv(netdev); 12450d160211SJeremy Fitzhardinge np->xbdev = dev; 12460d160211SJeremy Fitzhardinge 12472688fcb7SAndrew J. Bennieston np->queues = NULL; 12480d160211SJeremy Fitzhardinge 1249e00f85beSstephen hemminger err = -ENOMEM; 1250900e1833SDavid Vrabel np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1251900e1833SDavid Vrabel if (np->rx_stats == NULL) 1252900e1833SDavid Vrabel goto exit; 1253900e1833SDavid Vrabel np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1254900e1833SDavid Vrabel if (np->tx_stats == NULL) 1255e00f85beSstephen hemminger goto exit; 1256e00f85beSstephen hemminger 12570a0b9d2eSStephen Hemminger netdev->netdev_ops = &xennet_netdev_ops; 12580a0b9d2eSStephen Hemminger 1259fb507934SMichał Mirosław netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1260fb507934SMichał Mirosław NETIF_F_GSO_ROBUST; 12612c0057deSPaul Durrant netdev->hw_features = NETIF_F_SG | 12622c0057deSPaul Durrant NETIF_F_IPV6_CSUM | 12632c0057deSPaul Durrant NETIF_F_TSO | NETIF_F_TSO6; 12640d160211SJeremy Fitzhardinge 1265fc3e5941SIan Campbell /* 1266fc3e5941SIan Campbell * Assume that all hw features are available for now. This set 1267fc3e5941SIan Campbell * will be adjusted by the call to netdev_update_features() in 1268fc3e5941SIan Campbell * xennet_connect() which is the earliest point where we can 1269fc3e5941SIan Campbell * negotiate with the backend regarding supported features. 1270fc3e5941SIan Campbell */ 1271fc3e5941SIan Campbell netdev->features |= netdev->hw_features; 1272fc3e5941SIan Campbell 12737ad24ea4SWilfried Klaebe netdev->ethtool_ops = &xennet_ethtool_ops; 12740d160211SJeremy Fitzhardinge SET_NETDEV_DEV(netdev, &dev->dev); 12750d160211SJeremy Fitzhardinge 12760d160211SJeremy Fitzhardinge np->netdev = netdev; 12770d160211SJeremy Fitzhardinge 12780d160211SJeremy Fitzhardinge netif_carrier_off(netdev); 12790d160211SJeremy Fitzhardinge 12800d160211SJeremy Fitzhardinge return netdev; 12810d160211SJeremy Fitzhardinge 12820d160211SJeremy Fitzhardinge exit: 1283900e1833SDavid Vrabel xennet_free_netdev(netdev); 12840d160211SJeremy Fitzhardinge return ERR_PTR(err); 12850d160211SJeremy Fitzhardinge } 12860d160211SJeremy Fitzhardinge 12870d160211SJeremy Fitzhardinge /** 12880d160211SJeremy Fitzhardinge * Entry point to this code when a new device is created. Allocate the basic 12890d160211SJeremy Fitzhardinge * structures and the ring buffers for communication with the backend, and 12900d160211SJeremy Fitzhardinge * inform the backend of the appropriate details for those. 12910d160211SJeremy Fitzhardinge */ 12928e0e46bbSBill Pemberton static int netfront_probe(struct xenbus_device *dev, 12930d160211SJeremy Fitzhardinge const struct xenbus_device_id *id) 12940d160211SJeremy Fitzhardinge { 12950d160211SJeremy Fitzhardinge int err; 12960d160211SJeremy Fitzhardinge struct net_device *netdev; 12970d160211SJeremy Fitzhardinge struct netfront_info *info; 12980d160211SJeremy Fitzhardinge 12990d160211SJeremy Fitzhardinge netdev = xennet_create_dev(dev); 13000d160211SJeremy Fitzhardinge if (IS_ERR(netdev)) { 13010d160211SJeremy Fitzhardinge err = PTR_ERR(netdev); 13020d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "creating netdev"); 13030d160211SJeremy Fitzhardinge return err; 13040d160211SJeremy Fitzhardinge } 13050d160211SJeremy Fitzhardinge 13060d160211SJeremy Fitzhardinge info = netdev_priv(netdev); 13071b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, info); 130827b917e5STakashi Iwai #ifdef CONFIG_SYSFS 130927b917e5STakashi Iwai info->netdev->sysfs_groups[0] = &xennet_dev_group; 131027b917e5STakashi Iwai #endif 13110d160211SJeremy Fitzhardinge err = register_netdev(info->netdev); 13120d160211SJeremy Fitzhardinge if (err) { 1313383eda32SJoe Perches pr_warn("%s: register_netdev err=%d\n", __func__, err); 13140d160211SJeremy Fitzhardinge goto fail; 13150d160211SJeremy Fitzhardinge } 13160d160211SJeremy Fitzhardinge 13170d160211SJeremy Fitzhardinge return 0; 13180d160211SJeremy Fitzhardinge 13190d160211SJeremy Fitzhardinge fail: 1320900e1833SDavid Vrabel xennet_free_netdev(netdev); 13211b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, NULL); 13220d160211SJeremy Fitzhardinge return err; 13230d160211SJeremy Fitzhardinge } 13240d160211SJeremy Fitzhardinge 13250d160211SJeremy Fitzhardinge static void xennet_end_access(int ref, void *page) 13260d160211SJeremy Fitzhardinge { 13270d160211SJeremy Fitzhardinge /* This frees the page as a side-effect */ 13280d160211SJeremy Fitzhardinge if (ref != GRANT_INVALID_REF) 13290d160211SJeremy Fitzhardinge gnttab_end_foreign_access(ref, 0, (unsigned long)page); 13300d160211SJeremy Fitzhardinge } 13310d160211SJeremy Fitzhardinge 13320d160211SJeremy Fitzhardinge static void xennet_disconnect_backend(struct netfront_info *info) 13330d160211SJeremy Fitzhardinge { 13342688fcb7SAndrew J. Bennieston unsigned int i = 0; 13352688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues; 13360d160211SJeremy Fitzhardinge 1337f9feb1e6SDavid Vrabel netif_carrier_off(info->netdev); 1338f9feb1e6SDavid Vrabel 13399a873c71SChas Williams for (i = 0; i < num_queues && info->queues; ++i) { 134076541869SDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 134176541869SDavid Vrabel 13422688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 13432688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 13442688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 13452688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 13462688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->rx_irq, queue); 1347d634bf2cSWei Liu } 13482688fcb7SAndrew J. Bennieston queue->tx_evtchn = queue->rx_evtchn = 0; 13492688fcb7SAndrew J. Bennieston queue->tx_irq = queue->rx_irq = 0; 13500d160211SJeremy Fitzhardinge 1351f9feb1e6SDavid Vrabel napi_synchronize(&queue->napi); 1352f9feb1e6SDavid Vrabel 1353a5b5dc3cSDavid Vrabel xennet_release_tx_bufs(queue); 1354a5b5dc3cSDavid Vrabel xennet_release_rx_bufs(queue); 1355a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_tx_head); 1356a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_rx_head); 1357a5b5dc3cSDavid Vrabel 13580d160211SJeremy Fitzhardinge /* End access and free the pages */ 13592688fcb7SAndrew J. Bennieston xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 13602688fcb7SAndrew J. Bennieston xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 13610d160211SJeremy Fitzhardinge 13622688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 13632688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 13642688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 13652688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 13662688fcb7SAndrew J. Bennieston } 13670d160211SJeremy Fitzhardinge } 13680d160211SJeremy Fitzhardinge 13690d160211SJeremy Fitzhardinge /** 13700d160211SJeremy Fitzhardinge * We are reconnecting to the backend, due to a suspend/resume, or a backend 13710d160211SJeremy Fitzhardinge * driver restart. We tear down our netif structure and recreate it, but 13720d160211SJeremy Fitzhardinge * leave the device-layer structures intact so that this is transparent to the 13730d160211SJeremy Fitzhardinge * rest of the kernel. 13740d160211SJeremy Fitzhardinge */ 13750d160211SJeremy Fitzhardinge static int netfront_resume(struct xenbus_device *dev) 13760d160211SJeremy Fitzhardinge { 13771b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 13780d160211SJeremy Fitzhardinge 13790d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 13800d160211SJeremy Fitzhardinge 13810d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 13820d160211SJeremy Fitzhardinge return 0; 13830d160211SJeremy Fitzhardinge } 13840d160211SJeremy Fitzhardinge 13850d160211SJeremy Fitzhardinge static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 13860d160211SJeremy Fitzhardinge { 13870d160211SJeremy Fitzhardinge char *s, *e, *macstr; 13880d160211SJeremy Fitzhardinge int i; 13890d160211SJeremy Fitzhardinge 13900d160211SJeremy Fitzhardinge macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 13910d160211SJeremy Fitzhardinge if (IS_ERR(macstr)) 13920d160211SJeremy Fitzhardinge return PTR_ERR(macstr); 13930d160211SJeremy Fitzhardinge 13940d160211SJeremy Fitzhardinge for (i = 0; i < ETH_ALEN; i++) { 13950d160211SJeremy Fitzhardinge mac[i] = simple_strtoul(s, &e, 16); 13960d160211SJeremy Fitzhardinge if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 13970d160211SJeremy Fitzhardinge kfree(macstr); 13980d160211SJeremy Fitzhardinge return -ENOENT; 13990d160211SJeremy Fitzhardinge } 14000d160211SJeremy Fitzhardinge s = e+1; 14010d160211SJeremy Fitzhardinge } 14020d160211SJeremy Fitzhardinge 14030d160211SJeremy Fitzhardinge kfree(macstr); 14040d160211SJeremy Fitzhardinge return 0; 14050d160211SJeremy Fitzhardinge } 14060d160211SJeremy Fitzhardinge 14072688fcb7SAndrew J. Bennieston static int setup_netfront_single(struct netfront_queue *queue) 1408d634bf2cSWei Liu { 1409d634bf2cSWei Liu int err; 1410d634bf2cSWei Liu 14112688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1412d634bf2cSWei Liu if (err < 0) 1413d634bf2cSWei Liu goto fail; 1414d634bf2cSWei Liu 14152688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1416d634bf2cSWei Liu xennet_interrupt, 14172688fcb7SAndrew J. Bennieston 0, queue->info->netdev->name, queue); 1418d634bf2cSWei Liu if (err < 0) 1419d634bf2cSWei Liu goto bind_fail; 14202688fcb7SAndrew J. Bennieston queue->rx_evtchn = queue->tx_evtchn; 14212688fcb7SAndrew J. Bennieston queue->rx_irq = queue->tx_irq = err; 1422d634bf2cSWei Liu 1423d634bf2cSWei Liu return 0; 1424d634bf2cSWei Liu 1425d634bf2cSWei Liu bind_fail: 14262688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 14272688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1428d634bf2cSWei Liu fail: 1429d634bf2cSWei Liu return err; 1430d634bf2cSWei Liu } 1431d634bf2cSWei Liu 14322688fcb7SAndrew J. Bennieston static int setup_netfront_split(struct netfront_queue *queue) 1433d634bf2cSWei Liu { 1434d634bf2cSWei Liu int err; 1435d634bf2cSWei Liu 14362688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1437d634bf2cSWei Liu if (err < 0) 1438d634bf2cSWei Liu goto fail; 14392688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1440d634bf2cSWei Liu if (err < 0) 1441d634bf2cSWei Liu goto alloc_rx_evtchn_fail; 1442d634bf2cSWei Liu 14432688fcb7SAndrew J. Bennieston snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 14442688fcb7SAndrew J. Bennieston "%s-tx", queue->name); 14452688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1446d634bf2cSWei Liu xennet_tx_interrupt, 14472688fcb7SAndrew J. Bennieston 0, queue->tx_irq_name, queue); 1448d634bf2cSWei Liu if (err < 0) 1449d634bf2cSWei Liu goto bind_tx_fail; 14502688fcb7SAndrew J. Bennieston queue->tx_irq = err; 1451d634bf2cSWei Liu 14522688fcb7SAndrew J. Bennieston snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 14532688fcb7SAndrew J. Bennieston "%s-rx", queue->name); 14542688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->rx_evtchn, 1455d634bf2cSWei Liu xennet_rx_interrupt, 14562688fcb7SAndrew J. Bennieston 0, queue->rx_irq_name, queue); 1457d634bf2cSWei Liu if (err < 0) 1458d634bf2cSWei Liu goto bind_rx_fail; 14592688fcb7SAndrew J. Bennieston queue->rx_irq = err; 1460d634bf2cSWei Liu 1461d634bf2cSWei Liu return 0; 1462d634bf2cSWei Liu 1463d634bf2cSWei Liu bind_rx_fail: 14642688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 14652688fcb7SAndrew J. Bennieston queue->tx_irq = 0; 1466d634bf2cSWei Liu bind_tx_fail: 14672688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 14682688fcb7SAndrew J. Bennieston queue->rx_evtchn = 0; 1469d634bf2cSWei Liu alloc_rx_evtchn_fail: 14702688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 14712688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1472d634bf2cSWei Liu fail: 1473d634bf2cSWei Liu return err; 1474d634bf2cSWei Liu } 1475d634bf2cSWei Liu 14762688fcb7SAndrew J. Bennieston static int setup_netfront(struct xenbus_device *dev, 14772688fcb7SAndrew J. Bennieston struct netfront_queue *queue, unsigned int feature_split_evtchn) 14780d160211SJeremy Fitzhardinge { 14790d160211SJeremy Fitzhardinge struct xen_netif_tx_sring *txs; 14800d160211SJeremy Fitzhardinge struct xen_netif_rx_sring *rxs; 1481ccc9d90aSWei Liu grant_ref_t gref; 14820d160211SJeremy Fitzhardinge int err; 14830d160211SJeremy Fitzhardinge 14842688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 14852688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 14862688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 14872688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 14880d160211SJeremy Fitzhardinge 1489a144ff09SIan Campbell txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 14900d160211SJeremy Fitzhardinge if (!txs) { 14910d160211SJeremy Fitzhardinge err = -ENOMEM; 14920d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating tx ring page"); 14930d160211SJeremy Fitzhardinge goto fail; 14940d160211SJeremy Fitzhardinge } 14950d160211SJeremy Fitzhardinge SHARED_RING_INIT(txs); 14962688fcb7SAndrew J. Bennieston FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); 14970d160211SJeremy Fitzhardinge 1498ccc9d90aSWei Liu err = xenbus_grant_ring(dev, txs, 1, &gref); 14991ca2983aSWei Liu if (err < 0) 15001ca2983aSWei Liu goto grant_tx_ring_fail; 1501ccc9d90aSWei Liu queue->tx_ring_ref = gref; 15020d160211SJeremy Fitzhardinge 1503a144ff09SIan Campbell rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 15040d160211SJeremy Fitzhardinge if (!rxs) { 15050d160211SJeremy Fitzhardinge err = -ENOMEM; 15060d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating rx ring page"); 15071ca2983aSWei Liu goto alloc_rx_ring_fail; 15080d160211SJeremy Fitzhardinge } 15090d160211SJeremy Fitzhardinge SHARED_RING_INIT(rxs); 15102688fcb7SAndrew J. Bennieston FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); 15110d160211SJeremy Fitzhardinge 1512ccc9d90aSWei Liu err = xenbus_grant_ring(dev, rxs, 1, &gref); 15131ca2983aSWei Liu if (err < 0) 15141ca2983aSWei Liu goto grant_rx_ring_fail; 1515ccc9d90aSWei Liu queue->rx_ring_ref = gref; 15160d160211SJeremy Fitzhardinge 1517d634bf2cSWei Liu if (feature_split_evtchn) 15182688fcb7SAndrew J. Bennieston err = setup_netfront_split(queue); 1519d634bf2cSWei Liu /* setup single event channel if 1520d634bf2cSWei Liu * a) feature-split-event-channels == 0 1521d634bf2cSWei Liu * b) feature-split-event-channels == 1 but failed to setup 1522d634bf2cSWei Liu */ 1523d634bf2cSWei Liu if (!feature_split_evtchn || (feature_split_evtchn && err)) 15242688fcb7SAndrew J. Bennieston err = setup_netfront_single(queue); 1525d634bf2cSWei Liu 15260d160211SJeremy Fitzhardinge if (err) 15271ca2983aSWei Liu goto alloc_evtchn_fail; 15280d160211SJeremy Fitzhardinge 15290d160211SJeremy Fitzhardinge return 0; 15300d160211SJeremy Fitzhardinge 15311ca2983aSWei Liu /* If we fail to setup netfront, it is safe to just revoke access to 15321ca2983aSWei Liu * granted pages because backend is not accessing it at this point. 15331ca2983aSWei Liu */ 15341ca2983aSWei Liu alloc_evtchn_fail: 15352688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); 15361ca2983aSWei Liu grant_rx_ring_fail: 15371ca2983aSWei Liu free_page((unsigned long)rxs); 15381ca2983aSWei Liu alloc_rx_ring_fail: 15392688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); 15401ca2983aSWei Liu grant_tx_ring_fail: 15411ca2983aSWei Liu free_page((unsigned long)txs); 15420d160211SJeremy Fitzhardinge fail: 15430d160211SJeremy Fitzhardinge return err; 15440d160211SJeremy Fitzhardinge } 15450d160211SJeremy Fitzhardinge 15462688fcb7SAndrew J. Bennieston /* Queue-specific initialisation 15472688fcb7SAndrew J. Bennieston * This used to be done in xennet_create_dev() but must now 15482688fcb7SAndrew J. Bennieston * be run per-queue. 15492688fcb7SAndrew J. Bennieston */ 15502688fcb7SAndrew J. Bennieston static int xennet_init_queue(struct netfront_queue *queue) 15512688fcb7SAndrew J. Bennieston { 15522688fcb7SAndrew J. Bennieston unsigned short i; 15532688fcb7SAndrew J. Bennieston int err = 0; 15542688fcb7SAndrew J. Bennieston 15552688fcb7SAndrew J. Bennieston spin_lock_init(&queue->tx_lock); 15562688fcb7SAndrew J. Bennieston spin_lock_init(&queue->rx_lock); 15572688fcb7SAndrew J. Bennieston 1558493be55aSVaishali Thakkar setup_timer(&queue->rx_refill_timer, rx_refill_timeout, 1559493be55aSVaishali Thakkar (unsigned long)queue); 15602688fcb7SAndrew J. Bennieston 15618b715010SWei Liu snprintf(queue->name, sizeof(queue->name), "%s-q%u", 15628b715010SWei Liu queue->info->netdev->name, queue->id); 15638b715010SWei Liu 15642688fcb7SAndrew J. Bennieston /* Initialise tx_skbs as a free chain containing every entry. */ 15652688fcb7SAndrew J. Bennieston queue->tx_skb_freelist = 0; 15662688fcb7SAndrew J. Bennieston for (i = 0; i < NET_TX_RING_SIZE; i++) { 15672688fcb7SAndrew J. Bennieston skb_entry_set_link(&queue->tx_skbs[i], i+1); 15682688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 15692688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 15702688fcb7SAndrew J. Bennieston } 15712688fcb7SAndrew J. Bennieston 15722688fcb7SAndrew J. Bennieston /* Clear out rx_skbs */ 15732688fcb7SAndrew J. Bennieston for (i = 0; i < NET_RX_RING_SIZE; i++) { 15742688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 15752688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 15762688fcb7SAndrew J. Bennieston } 15772688fcb7SAndrew J. Bennieston 15782688fcb7SAndrew J. Bennieston /* A grant for every tx ring slot */ 15791f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 15802688fcb7SAndrew J. Bennieston &queue->gref_tx_head) < 0) { 15812688fcb7SAndrew J. Bennieston pr_alert("can't alloc tx grant refs\n"); 15822688fcb7SAndrew J. Bennieston err = -ENOMEM; 15832688fcb7SAndrew J. Bennieston goto exit; 15842688fcb7SAndrew J. Bennieston } 15852688fcb7SAndrew J. Bennieston 15862688fcb7SAndrew J. Bennieston /* A grant for every rx ring slot */ 15871f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 15882688fcb7SAndrew J. Bennieston &queue->gref_rx_head) < 0) { 15892688fcb7SAndrew J. Bennieston pr_alert("can't alloc rx grant refs\n"); 15902688fcb7SAndrew J. Bennieston err = -ENOMEM; 15912688fcb7SAndrew J. Bennieston goto exit_free_tx; 15922688fcb7SAndrew J. Bennieston } 15932688fcb7SAndrew J. Bennieston 15942688fcb7SAndrew J. Bennieston return 0; 15952688fcb7SAndrew J. Bennieston 15962688fcb7SAndrew J. Bennieston exit_free_tx: 15972688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head); 15982688fcb7SAndrew J. Bennieston exit: 15992688fcb7SAndrew J. Bennieston return err; 16002688fcb7SAndrew J. Bennieston } 16012688fcb7SAndrew J. Bennieston 160250ee6061SAndrew J. Bennieston static int write_queue_xenstore_keys(struct netfront_queue *queue, 160350ee6061SAndrew J. Bennieston struct xenbus_transaction *xbt, int write_hierarchical) 160450ee6061SAndrew J. Bennieston { 160550ee6061SAndrew J. Bennieston /* Write the queue-specific keys into XenStore in the traditional 160650ee6061SAndrew J. Bennieston * way for a single queue, or in a queue subkeys for multiple 160750ee6061SAndrew J. Bennieston * queues. 160850ee6061SAndrew J. Bennieston */ 160950ee6061SAndrew J. Bennieston struct xenbus_device *dev = queue->info->xbdev; 161050ee6061SAndrew J. Bennieston int err; 161150ee6061SAndrew J. Bennieston const char *message; 161250ee6061SAndrew J. Bennieston char *path; 161350ee6061SAndrew J. Bennieston size_t pathsize; 161450ee6061SAndrew J. Bennieston 161550ee6061SAndrew J. Bennieston /* Choose the correct place to write the keys */ 161650ee6061SAndrew J. Bennieston if (write_hierarchical) { 161750ee6061SAndrew J. Bennieston pathsize = strlen(dev->nodename) + 10; 161850ee6061SAndrew J. Bennieston path = kzalloc(pathsize, GFP_KERNEL); 161950ee6061SAndrew J. Bennieston if (!path) { 162050ee6061SAndrew J. Bennieston err = -ENOMEM; 162150ee6061SAndrew J. Bennieston message = "out of memory while writing ring references"; 162250ee6061SAndrew J. Bennieston goto error; 162350ee6061SAndrew J. Bennieston } 162450ee6061SAndrew J. Bennieston snprintf(path, pathsize, "%s/queue-%u", 162550ee6061SAndrew J. Bennieston dev->nodename, queue->id); 162650ee6061SAndrew J. Bennieston } else { 162750ee6061SAndrew J. Bennieston path = (char *)dev->nodename; 162850ee6061SAndrew J. Bennieston } 162950ee6061SAndrew J. Bennieston 163050ee6061SAndrew J. Bennieston /* Write ring references */ 163150ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 163250ee6061SAndrew J. Bennieston queue->tx_ring_ref); 163350ee6061SAndrew J. Bennieston if (err) { 163450ee6061SAndrew J. Bennieston message = "writing tx-ring-ref"; 163550ee6061SAndrew J. Bennieston goto error; 163650ee6061SAndrew J. Bennieston } 163750ee6061SAndrew J. Bennieston 163850ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 163950ee6061SAndrew J. Bennieston queue->rx_ring_ref); 164050ee6061SAndrew J. Bennieston if (err) { 164150ee6061SAndrew J. Bennieston message = "writing rx-ring-ref"; 164250ee6061SAndrew J. Bennieston goto error; 164350ee6061SAndrew J. Bennieston } 164450ee6061SAndrew J. Bennieston 164550ee6061SAndrew J. Bennieston /* Write event channels; taking into account both shared 164650ee6061SAndrew J. Bennieston * and split event channel scenarios. 164750ee6061SAndrew J. Bennieston */ 164850ee6061SAndrew J. Bennieston if (queue->tx_evtchn == queue->rx_evtchn) { 164950ee6061SAndrew J. Bennieston /* Shared event channel */ 165050ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 165150ee6061SAndrew J. Bennieston "event-channel", "%u", queue->tx_evtchn); 165250ee6061SAndrew J. Bennieston if (err) { 165350ee6061SAndrew J. Bennieston message = "writing event-channel"; 165450ee6061SAndrew J. Bennieston goto error; 165550ee6061SAndrew J. Bennieston } 165650ee6061SAndrew J. Bennieston } else { 165750ee6061SAndrew J. Bennieston /* Split event channels */ 165850ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 165950ee6061SAndrew J. Bennieston "event-channel-tx", "%u", queue->tx_evtchn); 166050ee6061SAndrew J. Bennieston if (err) { 166150ee6061SAndrew J. Bennieston message = "writing event-channel-tx"; 166250ee6061SAndrew J. Bennieston goto error; 166350ee6061SAndrew J. Bennieston } 166450ee6061SAndrew J. Bennieston 166550ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 166650ee6061SAndrew J. Bennieston "event-channel-rx", "%u", queue->rx_evtchn); 166750ee6061SAndrew J. Bennieston if (err) { 166850ee6061SAndrew J. Bennieston message = "writing event-channel-rx"; 166950ee6061SAndrew J. Bennieston goto error; 167050ee6061SAndrew J. Bennieston } 167150ee6061SAndrew J. Bennieston } 167250ee6061SAndrew J. Bennieston 167350ee6061SAndrew J. Bennieston if (write_hierarchical) 167450ee6061SAndrew J. Bennieston kfree(path); 167550ee6061SAndrew J. Bennieston return 0; 167650ee6061SAndrew J. Bennieston 167750ee6061SAndrew J. Bennieston error: 167850ee6061SAndrew J. Bennieston if (write_hierarchical) 167950ee6061SAndrew J. Bennieston kfree(path); 168050ee6061SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "%s", message); 168150ee6061SAndrew J. Bennieston return err; 168250ee6061SAndrew J. Bennieston } 168350ee6061SAndrew J. Bennieston 1684ce58725fSDavid Vrabel static void xennet_destroy_queues(struct netfront_info *info) 1685ce58725fSDavid Vrabel { 1686ce58725fSDavid Vrabel unsigned int i; 1687ce58725fSDavid Vrabel 1688ce58725fSDavid Vrabel rtnl_lock(); 1689ce58725fSDavid Vrabel 1690ce58725fSDavid Vrabel for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 1691ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1692ce58725fSDavid Vrabel 1693ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1694ce58725fSDavid Vrabel napi_disable(&queue->napi); 1695ad068118SDavid Vrabel del_timer_sync(&queue->rx_refill_timer); 1696ce58725fSDavid Vrabel netif_napi_del(&queue->napi); 1697ce58725fSDavid Vrabel } 1698ce58725fSDavid Vrabel 1699ce58725fSDavid Vrabel rtnl_unlock(); 1700ce58725fSDavid Vrabel 1701ce58725fSDavid Vrabel kfree(info->queues); 1702ce58725fSDavid Vrabel info->queues = NULL; 1703ce58725fSDavid Vrabel } 1704ce58725fSDavid Vrabel 1705ce58725fSDavid Vrabel static int xennet_create_queues(struct netfront_info *info, 1706ce58725fSDavid Vrabel unsigned int num_queues) 1707ce58725fSDavid Vrabel { 1708ce58725fSDavid Vrabel unsigned int i; 1709ce58725fSDavid Vrabel int ret; 1710ce58725fSDavid Vrabel 1711ce58725fSDavid Vrabel info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), 1712ce58725fSDavid Vrabel GFP_KERNEL); 1713ce58725fSDavid Vrabel if (!info->queues) 1714ce58725fSDavid Vrabel return -ENOMEM; 1715ce58725fSDavid Vrabel 1716ce58725fSDavid Vrabel rtnl_lock(); 1717ce58725fSDavid Vrabel 1718ce58725fSDavid Vrabel for (i = 0; i < num_queues; i++) { 1719ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1720ce58725fSDavid Vrabel 1721ce58725fSDavid Vrabel queue->id = i; 1722ce58725fSDavid Vrabel queue->info = info; 1723ce58725fSDavid Vrabel 1724ce58725fSDavid Vrabel ret = xennet_init_queue(queue); 1725ce58725fSDavid Vrabel if (ret < 0) { 172669cb8524SDavid Vrabel dev_warn(&info->netdev->dev, 172769cb8524SDavid Vrabel "only created %d queues\n", i); 1728ce58725fSDavid Vrabel num_queues = i; 1729ce58725fSDavid Vrabel break; 1730ce58725fSDavid Vrabel } 1731ce58725fSDavid Vrabel 1732ce58725fSDavid Vrabel netif_napi_add(queue->info->netdev, &queue->napi, 1733ce58725fSDavid Vrabel xennet_poll, 64); 1734ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1735ce58725fSDavid Vrabel napi_enable(&queue->napi); 1736ce58725fSDavid Vrabel } 1737ce58725fSDavid Vrabel 1738ce58725fSDavid Vrabel netif_set_real_num_tx_queues(info->netdev, num_queues); 1739ce58725fSDavid Vrabel 1740ce58725fSDavid Vrabel rtnl_unlock(); 1741ce58725fSDavid Vrabel 1742ce58725fSDavid Vrabel if (num_queues == 0) { 1743ce58725fSDavid Vrabel dev_err(&info->netdev->dev, "no queues\n"); 1744ce58725fSDavid Vrabel return -EINVAL; 1745ce58725fSDavid Vrabel } 1746ce58725fSDavid Vrabel return 0; 1747ce58725fSDavid Vrabel } 1748ce58725fSDavid Vrabel 17490d160211SJeremy Fitzhardinge /* Common code used when first setting up, and when resuming. */ 1750f502bf2bSIan Campbell static int talk_to_netback(struct xenbus_device *dev, 17510d160211SJeremy Fitzhardinge struct netfront_info *info) 17520d160211SJeremy Fitzhardinge { 17530d160211SJeremy Fitzhardinge const char *message; 17540d160211SJeremy Fitzhardinge struct xenbus_transaction xbt; 17550d160211SJeremy Fitzhardinge int err; 17562688fcb7SAndrew J. Bennieston unsigned int feature_split_evtchn; 17572688fcb7SAndrew J. Bennieston unsigned int i = 0; 175850ee6061SAndrew J. Bennieston unsigned int max_queues = 0; 17592688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 17602688fcb7SAndrew J. Bennieston unsigned int num_queues = 1; 17610d160211SJeremy Fitzhardinge 17622688fcb7SAndrew J. Bennieston info->netdev->irq = 0; 17632688fcb7SAndrew J. Bennieston 176450ee6061SAndrew J. Bennieston /* Check if backend supports multiple queues */ 176550ee6061SAndrew J. Bennieston err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 176650ee6061SAndrew J. Bennieston "multi-queue-max-queues", "%u", &max_queues); 176750ee6061SAndrew J. Bennieston if (err < 0) 176850ee6061SAndrew J. Bennieston max_queues = 1; 176950ee6061SAndrew J. Bennieston num_queues = min(max_queues, xennet_max_queues); 177050ee6061SAndrew J. Bennieston 17712688fcb7SAndrew J. Bennieston /* Check feature-split-event-channels */ 17722688fcb7SAndrew J. Bennieston err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 17732688fcb7SAndrew J. Bennieston "feature-split-event-channels", "%u", 17742688fcb7SAndrew J. Bennieston &feature_split_evtchn); 17752688fcb7SAndrew J. Bennieston if (err < 0) 17762688fcb7SAndrew J. Bennieston feature_split_evtchn = 0; 17772688fcb7SAndrew J. Bennieston 17782688fcb7SAndrew J. Bennieston /* Read mac addr. */ 17792688fcb7SAndrew J. Bennieston err = xen_net_read_mac(dev, info->netdev->dev_addr); 17802688fcb7SAndrew J. Bennieston if (err) { 17812688fcb7SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 17820d160211SJeremy Fitzhardinge goto out; 17832688fcb7SAndrew J. Bennieston } 17842688fcb7SAndrew J. Bennieston 1785ce58725fSDavid Vrabel if (info->queues) 1786ce58725fSDavid Vrabel xennet_destroy_queues(info); 1787ce58725fSDavid Vrabel 1788ce58725fSDavid Vrabel err = xennet_create_queues(info, num_queues); 1789ce58725fSDavid Vrabel if (err < 0) 1790ce58725fSDavid Vrabel goto destroy_ring; 17912688fcb7SAndrew J. Bennieston 17922688fcb7SAndrew J. Bennieston /* Create shared ring, alloc event channel -- for each queue */ 17932688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 17942688fcb7SAndrew J. Bennieston queue = &info->queues[i]; 17952688fcb7SAndrew J. Bennieston err = setup_netfront(dev, queue, feature_split_evtchn); 17962688fcb7SAndrew J. Bennieston if (err) { 1797ce58725fSDavid Vrabel /* setup_netfront() will tidy up the current 1798ce58725fSDavid Vrabel * queue on error, but we need to clean up 17992688fcb7SAndrew J. Bennieston * those already allocated. 18002688fcb7SAndrew J. Bennieston */ 18012688fcb7SAndrew J. Bennieston if (i > 0) { 18022688fcb7SAndrew J. Bennieston rtnl_lock(); 18032688fcb7SAndrew J. Bennieston netif_set_real_num_tx_queues(info->netdev, i); 18042688fcb7SAndrew J. Bennieston rtnl_unlock(); 18052688fcb7SAndrew J. Bennieston goto destroy_ring; 18062688fcb7SAndrew J. Bennieston } else { 18072688fcb7SAndrew J. Bennieston goto out; 18082688fcb7SAndrew J. Bennieston } 18092688fcb7SAndrew J. Bennieston } 18102688fcb7SAndrew J. Bennieston } 18110d160211SJeremy Fitzhardinge 18120d160211SJeremy Fitzhardinge again: 18130d160211SJeremy Fitzhardinge err = xenbus_transaction_start(&xbt); 18140d160211SJeremy Fitzhardinge if (err) { 18150d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "starting transaction"); 18160d160211SJeremy Fitzhardinge goto destroy_ring; 18170d160211SJeremy Fitzhardinge } 18180d160211SJeremy Fitzhardinge 181950ee6061SAndrew J. Bennieston if (num_queues == 1) { 182050ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 182150ee6061SAndrew J. Bennieston if (err) 182250ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1823d634bf2cSWei Liu } else { 182450ee6061SAndrew J. Bennieston /* Write the number of queues */ 182550ee6061SAndrew J. Bennieston err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", 182650ee6061SAndrew J. Bennieston "%u", num_queues); 1827d634bf2cSWei Liu if (err) { 182850ee6061SAndrew J. Bennieston message = "writing multi-queue-num-queues"; 182950ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1830d634bf2cSWei Liu } 183150ee6061SAndrew J. Bennieston 183250ee6061SAndrew J. Bennieston /* Write the keys for each queue */ 183350ee6061SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 183450ee6061SAndrew J. Bennieston queue = &info->queues[i]; 183550ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 183650ee6061SAndrew J. Bennieston if (err) 183750ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1838d634bf2cSWei Liu } 1839d634bf2cSWei Liu } 18400d160211SJeremy Fitzhardinge 184150ee6061SAndrew J. Bennieston /* The remaining keys are not queue-specific */ 18420d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 18430d160211SJeremy Fitzhardinge 1); 18440d160211SJeremy Fitzhardinge if (err) { 18450d160211SJeremy Fitzhardinge message = "writing request-rx-copy"; 18460d160211SJeremy Fitzhardinge goto abort_transaction; 18470d160211SJeremy Fitzhardinge } 18480d160211SJeremy Fitzhardinge 18490d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 18500d160211SJeremy Fitzhardinge if (err) { 18510d160211SJeremy Fitzhardinge message = "writing feature-rx-notify"; 18520d160211SJeremy Fitzhardinge goto abort_transaction; 18530d160211SJeremy Fitzhardinge } 18540d160211SJeremy Fitzhardinge 18550d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 18560d160211SJeremy Fitzhardinge if (err) { 18570d160211SJeremy Fitzhardinge message = "writing feature-sg"; 18580d160211SJeremy Fitzhardinge goto abort_transaction; 18590d160211SJeremy Fitzhardinge } 18600d160211SJeremy Fitzhardinge 18610d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 18620d160211SJeremy Fitzhardinge if (err) { 18630d160211SJeremy Fitzhardinge message = "writing feature-gso-tcpv4"; 18640d160211SJeremy Fitzhardinge goto abort_transaction; 18650d160211SJeremy Fitzhardinge } 18660d160211SJeremy Fitzhardinge 18672c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 18682c0057deSPaul Durrant if (err) { 18692c0057deSPaul Durrant message = "writing feature-gso-tcpv6"; 18702c0057deSPaul Durrant goto abort_transaction; 18712c0057deSPaul Durrant } 18722c0057deSPaul Durrant 18732c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 18742c0057deSPaul Durrant "1"); 18752c0057deSPaul Durrant if (err) { 18762c0057deSPaul Durrant message = "writing feature-ipv6-csum-offload"; 18772c0057deSPaul Durrant goto abort_transaction; 18782c0057deSPaul Durrant } 18792c0057deSPaul Durrant 18800d160211SJeremy Fitzhardinge err = xenbus_transaction_end(xbt, 0); 18810d160211SJeremy Fitzhardinge if (err) { 18820d160211SJeremy Fitzhardinge if (err == -EAGAIN) 18830d160211SJeremy Fitzhardinge goto again; 18840d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "completing transaction"); 18850d160211SJeremy Fitzhardinge goto destroy_ring; 18860d160211SJeremy Fitzhardinge } 18870d160211SJeremy Fitzhardinge 18880d160211SJeremy Fitzhardinge return 0; 18890d160211SJeremy Fitzhardinge 18900d160211SJeremy Fitzhardinge abort_transaction: 18910d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "%s", message); 189250ee6061SAndrew J. Bennieston abort_transaction_no_dev_fatal: 189350ee6061SAndrew J. Bennieston xenbus_transaction_end(xbt, 1); 18940d160211SJeremy Fitzhardinge destroy_ring: 18950d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 18962688fcb7SAndrew J. Bennieston kfree(info->queues); 18972688fcb7SAndrew J. Bennieston info->queues = NULL; 18980d160211SJeremy Fitzhardinge out: 18990d160211SJeremy Fitzhardinge return err; 19000d160211SJeremy Fitzhardinge } 19010d160211SJeremy Fitzhardinge 19020d160211SJeremy Fitzhardinge static int xennet_connect(struct net_device *dev) 19030d160211SJeremy Fitzhardinge { 19040d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 19052688fcb7SAndrew J. Bennieston unsigned int num_queues = 0; 1906a5b5dc3cSDavid Vrabel int err; 19070d160211SJeremy Fitzhardinge unsigned int feature_rx_copy; 19082688fcb7SAndrew J. Bennieston unsigned int j = 0; 19092688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 19100d160211SJeremy Fitzhardinge 19110d160211SJeremy Fitzhardinge err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 19120d160211SJeremy Fitzhardinge "feature-rx-copy", "%u", &feature_rx_copy); 19130d160211SJeremy Fitzhardinge if (err != 1) 19140d160211SJeremy Fitzhardinge feature_rx_copy = 0; 19150d160211SJeremy Fitzhardinge 19160d160211SJeremy Fitzhardinge if (!feature_rx_copy) { 19170d160211SJeremy Fitzhardinge dev_info(&dev->dev, 1918898eb71cSJoe Perches "backend does not support copying receive path\n"); 19190d160211SJeremy Fitzhardinge return -ENODEV; 19200d160211SJeremy Fitzhardinge } 19210d160211SJeremy Fitzhardinge 1922f502bf2bSIan Campbell err = talk_to_netback(np->xbdev, np); 19230d160211SJeremy Fitzhardinge if (err) 19240d160211SJeremy Fitzhardinge return err; 19250d160211SJeremy Fitzhardinge 19262688fcb7SAndrew J. Bennieston /* talk_to_netback() sets the correct number of queues */ 19272688fcb7SAndrew J. Bennieston num_queues = dev->real_num_tx_queues; 19282688fcb7SAndrew J. Bennieston 19291ba37c51SIan Campbell rtnl_lock(); 1930fb507934SMichał Mirosław netdev_update_features(dev); 19311ba37c51SIan Campbell rtnl_unlock(); 19320d160211SJeremy Fitzhardinge 19330d160211SJeremy Fitzhardinge /* 1934a5b5dc3cSDavid Vrabel * All public and private state should now be sane. Get 19350d160211SJeremy Fitzhardinge * ready to start sending and receiving packets and give the driver 19360d160211SJeremy Fitzhardinge * domain a kick because we've probably just requeued some 19370d160211SJeremy Fitzhardinge * packets. 19380d160211SJeremy Fitzhardinge */ 19390d160211SJeremy Fitzhardinge netif_carrier_on(np->netdev); 19402688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) { 19412688fcb7SAndrew J. Bennieston queue = &np->queues[j]; 1942f50b4076SDavid Vrabel 19432688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 19442688fcb7SAndrew J. Bennieston if (queue->tx_irq != queue->rx_irq) 19452688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 19460d160211SJeremy Fitzhardinge 1947f50b4076SDavid Vrabel spin_lock_irq(&queue->tx_lock); 1948f50b4076SDavid Vrabel xennet_tx_buf_gc(queue); 19492688fcb7SAndrew J. Bennieston spin_unlock_irq(&queue->tx_lock); 1950f50b4076SDavid Vrabel 1951f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock); 1952f50b4076SDavid Vrabel xennet_alloc_rx_buffers(queue); 19532688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 19542688fcb7SAndrew J. Bennieston } 19550d160211SJeremy Fitzhardinge 19560d160211SJeremy Fitzhardinge return 0; 19570d160211SJeremy Fitzhardinge } 19580d160211SJeremy Fitzhardinge 19590d160211SJeremy Fitzhardinge /** 19600d160211SJeremy Fitzhardinge * Callback received when the backend's state changes. 19610d160211SJeremy Fitzhardinge */ 1962f502bf2bSIan Campbell static void netback_changed(struct xenbus_device *dev, 19630d160211SJeremy Fitzhardinge enum xenbus_state backend_state) 19640d160211SJeremy Fitzhardinge { 19651b713e00SGreg Kroah-Hartman struct netfront_info *np = dev_get_drvdata(&dev->dev); 19660d160211SJeremy Fitzhardinge struct net_device *netdev = np->netdev; 19670d160211SJeremy Fitzhardinge 19680d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 19690d160211SJeremy Fitzhardinge 19700d160211SJeremy Fitzhardinge switch (backend_state) { 19710d160211SJeremy Fitzhardinge case XenbusStateInitialising: 19720d160211SJeremy Fitzhardinge case XenbusStateInitialised: 1973b78c9512SNoboru Iwamatsu case XenbusStateReconfiguring: 1974b78c9512SNoboru Iwamatsu case XenbusStateReconfigured: 19750d160211SJeremy Fitzhardinge case XenbusStateUnknown: 19760d160211SJeremy Fitzhardinge break; 19770d160211SJeremy Fitzhardinge 19780d160211SJeremy Fitzhardinge case XenbusStateInitWait: 19790d160211SJeremy Fitzhardinge if (dev->state != XenbusStateInitialising) 19800d160211SJeremy Fitzhardinge break; 19810d160211SJeremy Fitzhardinge if (xennet_connect(netdev) != 0) 19820d160211SJeremy Fitzhardinge break; 19830d160211SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateConnected); 198408e34eb1SLaszlo Ersek break; 198508e34eb1SLaszlo Ersek 198608e34eb1SLaszlo Ersek case XenbusStateConnected: 1987ee89bab1SAmerigo Wang netdev_notify_peers(netdev); 19880d160211SJeremy Fitzhardinge break; 19890d160211SJeremy Fitzhardinge 1990bce3ea81SDavid Vrabel case XenbusStateClosed: 1991bce3ea81SDavid Vrabel if (dev->state == XenbusStateClosed) 1992bce3ea81SDavid Vrabel break; 1993bce3ea81SDavid Vrabel /* Missed the backend's CLOSING state -- fallthrough */ 19940d160211SJeremy Fitzhardinge case XenbusStateClosing: 19950d160211SJeremy Fitzhardinge xenbus_frontend_closed(dev); 19960d160211SJeremy Fitzhardinge break; 19970d160211SJeremy Fitzhardinge } 19980d160211SJeremy Fitzhardinge } 19990d160211SJeremy Fitzhardinge 2000e0ce4af9SIan Campbell static const struct xennet_stat { 2001e0ce4af9SIan Campbell char name[ETH_GSTRING_LEN]; 2002e0ce4af9SIan Campbell u16 offset; 2003e0ce4af9SIan Campbell } xennet_stats[] = { 2004e0ce4af9SIan Campbell { 2005e0ce4af9SIan Campbell "rx_gso_checksum_fixup", 2006e0ce4af9SIan Campbell offsetof(struct netfront_info, rx_gso_checksum_fixup) 2007e0ce4af9SIan Campbell }, 2008e0ce4af9SIan Campbell }; 2009e0ce4af9SIan Campbell 2010e0ce4af9SIan Campbell static int xennet_get_sset_count(struct net_device *dev, int string_set) 2011e0ce4af9SIan Campbell { 2012e0ce4af9SIan Campbell switch (string_set) { 2013e0ce4af9SIan Campbell case ETH_SS_STATS: 2014e0ce4af9SIan Campbell return ARRAY_SIZE(xennet_stats); 2015e0ce4af9SIan Campbell default: 2016e0ce4af9SIan Campbell return -EINVAL; 2017e0ce4af9SIan Campbell } 2018e0ce4af9SIan Campbell } 2019e0ce4af9SIan Campbell 2020e0ce4af9SIan Campbell static void xennet_get_ethtool_stats(struct net_device *dev, 2021e0ce4af9SIan Campbell struct ethtool_stats *stats, u64 * data) 2022e0ce4af9SIan Campbell { 2023e0ce4af9SIan Campbell void *np = netdev_priv(dev); 2024e0ce4af9SIan Campbell int i; 2025e0ce4af9SIan Campbell 2026e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 20272688fcb7SAndrew J. Bennieston data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2028e0ce4af9SIan Campbell } 2029e0ce4af9SIan Campbell 2030e0ce4af9SIan Campbell static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2031e0ce4af9SIan Campbell { 2032e0ce4af9SIan Campbell int i; 2033e0ce4af9SIan Campbell 2034e0ce4af9SIan Campbell switch (stringset) { 2035e0ce4af9SIan Campbell case ETH_SS_STATS: 2036e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2037e0ce4af9SIan Campbell memcpy(data + i * ETH_GSTRING_LEN, 2038e0ce4af9SIan Campbell xennet_stats[i].name, ETH_GSTRING_LEN); 2039e0ce4af9SIan Campbell break; 2040e0ce4af9SIan Campbell } 2041e0ce4af9SIan Campbell } 2042e0ce4af9SIan Campbell 20430fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops = 20440d160211SJeremy Fitzhardinge { 20450d160211SJeremy Fitzhardinge .get_link = ethtool_op_get_link, 2046e0ce4af9SIan Campbell 2047e0ce4af9SIan Campbell .get_sset_count = xennet_get_sset_count, 2048e0ce4af9SIan Campbell .get_ethtool_stats = xennet_get_ethtool_stats, 2049e0ce4af9SIan Campbell .get_strings = xennet_get_strings, 20500d160211SJeremy Fitzhardinge }; 20510d160211SJeremy Fitzhardinge 20520d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 20531f3c2ebaSDavid Vrabel static ssize_t show_rxbuf(struct device *dev, 20540d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 20550d160211SJeremy Fitzhardinge { 20561f3c2ebaSDavid Vrabel return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); 20570d160211SJeremy Fitzhardinge } 20580d160211SJeremy Fitzhardinge 20591f3c2ebaSDavid Vrabel static ssize_t store_rxbuf(struct device *dev, 20600d160211SJeremy Fitzhardinge struct device_attribute *attr, 20610d160211SJeremy Fitzhardinge const char *buf, size_t len) 20620d160211SJeremy Fitzhardinge { 20630d160211SJeremy Fitzhardinge char *endp; 20640d160211SJeremy Fitzhardinge unsigned long target; 20650d160211SJeremy Fitzhardinge 20660d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN)) 20670d160211SJeremy Fitzhardinge return -EPERM; 20680d160211SJeremy Fitzhardinge 20690d160211SJeremy Fitzhardinge target = simple_strtoul(buf, &endp, 0); 20700d160211SJeremy Fitzhardinge if (endp == buf) 20710d160211SJeremy Fitzhardinge return -EBADMSG; 20720d160211SJeremy Fitzhardinge 20731f3c2ebaSDavid Vrabel /* rxbuf_min and rxbuf_max are no longer configurable. */ 20740d160211SJeremy Fitzhardinge 20750d160211SJeremy Fitzhardinge return len; 20760d160211SJeremy Fitzhardinge } 20770d160211SJeremy Fitzhardinge 207827b917e5STakashi Iwai static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf); 207927b917e5STakashi Iwai static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf); 208027b917e5STakashi Iwai static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL); 208127b917e5STakashi Iwai 208227b917e5STakashi Iwai static struct attribute *xennet_dev_attrs[] = { 208327b917e5STakashi Iwai &dev_attr_rxbuf_min.attr, 208427b917e5STakashi Iwai &dev_attr_rxbuf_max.attr, 208527b917e5STakashi Iwai &dev_attr_rxbuf_cur.attr, 208627b917e5STakashi Iwai NULL 20870d160211SJeremy Fitzhardinge }; 20880d160211SJeremy Fitzhardinge 208927b917e5STakashi Iwai static const struct attribute_group xennet_dev_group = { 209027b917e5STakashi Iwai .attrs = xennet_dev_attrs 209127b917e5STakashi Iwai }; 20920d160211SJeremy Fitzhardinge #endif /* CONFIG_SYSFS */ 20930d160211SJeremy Fitzhardinge 20948e0e46bbSBill Pemberton static int xennet_remove(struct xenbus_device *dev) 20950d160211SJeremy Fitzhardinge { 20961b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 20970d160211SJeremy Fitzhardinge 20980d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 20990d160211SJeremy Fitzhardinge 21000d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 21010d160211SJeremy Fitzhardinge 21026bc96d04SIan Campbell unregister_netdev(info->netdev); 21036bc96d04SIan Campbell 21049a873c71SChas Williams if (info->queues) 2105ad068118SDavid Vrabel xennet_destroy_queues(info); 2106900e1833SDavid Vrabel xennet_free_netdev(info->netdev); 21070d160211SJeremy Fitzhardinge 21080d160211SJeremy Fitzhardinge return 0; 21090d160211SJeremy Fitzhardinge } 21100d160211SJeremy Fitzhardinge 211195afae48SDavid Vrabel static const struct xenbus_device_id netfront_ids[] = { 211295afae48SDavid Vrabel { "vif" }, 211395afae48SDavid Vrabel { "" } 211495afae48SDavid Vrabel }; 211595afae48SDavid Vrabel 211695afae48SDavid Vrabel static struct xenbus_driver netfront_driver = { 211795afae48SDavid Vrabel .ids = netfront_ids, 21180d160211SJeremy Fitzhardinge .probe = netfront_probe, 21198e0e46bbSBill Pemberton .remove = xennet_remove, 21200d160211SJeremy Fitzhardinge .resume = netfront_resume, 2121f502bf2bSIan Campbell .otherend_changed = netback_changed, 212295afae48SDavid Vrabel }; 21230d160211SJeremy Fitzhardinge 21240d160211SJeremy Fitzhardinge static int __init netif_init(void) 21250d160211SJeremy Fitzhardinge { 21266e833587SJeremy Fitzhardinge if (!xen_domain()) 21270d160211SJeremy Fitzhardinge return -ENODEV; 21280d160211SJeremy Fitzhardinge 212951c71a3bSKonrad Rzeszutek Wilk if (!xen_has_pv_nic_devices()) 2130b9136d20SIgor Mammedov return -ENODEV; 2131b9136d20SIgor Mammedov 2132383eda32SJoe Perches pr_info("Initialising Xen virtual ethernet driver\n"); 21330d160211SJeremy Fitzhardinge 213450ee6061SAndrew J. Bennieston /* Allow as many queues as there are CPUs, by default */ 213550ee6061SAndrew J. Bennieston xennet_max_queues = num_online_cpus(); 213650ee6061SAndrew J. Bennieston 2137ffb78a26SAl Viro return xenbus_register_frontend(&netfront_driver); 21380d160211SJeremy Fitzhardinge } 21390d160211SJeremy Fitzhardinge module_init(netif_init); 21400d160211SJeremy Fitzhardinge 21410d160211SJeremy Fitzhardinge 21420d160211SJeremy Fitzhardinge static void __exit netif_exit(void) 21430d160211SJeremy Fitzhardinge { 2144ffb78a26SAl Viro xenbus_unregister_driver(&netfront_driver); 21450d160211SJeremy Fitzhardinge } 21460d160211SJeremy Fitzhardinge module_exit(netif_exit); 21470d160211SJeremy Fitzhardinge 21480d160211SJeremy Fitzhardinge MODULE_DESCRIPTION("Xen virtual network device frontend"); 21490d160211SJeremy Fitzhardinge MODULE_LICENSE("GPL"); 2150d2f0c52bSMark McLoughlin MODULE_ALIAS("xen:vif"); 21514f93f09bSMark McLoughlin MODULE_ALIAS("xennet"); 2152