10d160211SJeremy Fitzhardinge /* 20d160211SJeremy Fitzhardinge * Virtual network driver for conversing with remote driver backends. 30d160211SJeremy Fitzhardinge * 40d160211SJeremy Fitzhardinge * Copyright (c) 2002-2005, K A Fraser 50d160211SJeremy Fitzhardinge * Copyright (c) 2005, XenSource Ltd 60d160211SJeremy Fitzhardinge * 70d160211SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or 80d160211SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2 90d160211SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed 100d160211SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other 110d160211SJeremy Fitzhardinge * software packages, subject to the following license: 120d160211SJeremy Fitzhardinge * 130d160211SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy 140d160211SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without 150d160211SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify, 160d160211SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software, 170d160211SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to 180d160211SJeremy Fitzhardinge * the following conditions: 190d160211SJeremy Fitzhardinge * 200d160211SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in 210d160211SJeremy Fitzhardinge * all copies or substantial portions of the Software. 220d160211SJeremy Fitzhardinge * 230d160211SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 240d160211SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 250d160211SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 260d160211SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 270d160211SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 280d160211SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 290d160211SJeremy Fitzhardinge * IN THE SOFTWARE. 300d160211SJeremy Fitzhardinge */ 310d160211SJeremy Fitzhardinge 32383eda32SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33383eda32SJoe Perches 340d160211SJeremy Fitzhardinge #include <linux/module.h> 350d160211SJeremy Fitzhardinge #include <linux/kernel.h> 360d160211SJeremy Fitzhardinge #include <linux/netdevice.h> 370d160211SJeremy Fitzhardinge #include <linux/etherdevice.h> 380d160211SJeremy Fitzhardinge #include <linux/skbuff.h> 390d160211SJeremy Fitzhardinge #include <linux/ethtool.h> 400d160211SJeremy Fitzhardinge #include <linux/if_ether.h> 419ecd1a75SWei Liu #include <net/tcp.h> 420d160211SJeremy Fitzhardinge #include <linux/udp.h> 430d160211SJeremy Fitzhardinge #include <linux/moduleparam.h> 440d160211SJeremy Fitzhardinge #include <linux/mm.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 460d160211SJeremy Fitzhardinge #include <net/ip.h> 470d160211SJeremy Fitzhardinge 481ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 490d160211SJeremy Fitzhardinge #include <xen/xenbus.h> 500d160211SJeremy Fitzhardinge #include <xen/events.h> 510d160211SJeremy Fitzhardinge #include <xen/page.h> 52b9136d20SIgor Mammedov #include <xen/platform_pci.h> 530d160211SJeremy Fitzhardinge #include <xen/grant_table.h> 540d160211SJeremy Fitzhardinge 550d160211SJeremy Fitzhardinge #include <xen/interface/io/netif.h> 560d160211SJeremy Fitzhardinge #include <xen/interface/memory.h> 570d160211SJeremy Fitzhardinge #include <xen/interface/grant_table.h> 580d160211SJeremy Fitzhardinge 5950ee6061SAndrew J. Bennieston /* Module parameters */ 60034702a6SJuergen Gross #define MAX_QUEUES_DEFAULT 8 6150ee6061SAndrew J. Bennieston static unsigned int xennet_max_queues; 6250ee6061SAndrew J. Bennieston module_param_named(max_queues, xennet_max_queues, uint, 0644); 6350ee6061SAndrew J. Bennieston MODULE_PARM_DESC(max_queues, 6450ee6061SAndrew J. Bennieston "Maximum number of queues per virtual interface"); 6550ee6061SAndrew J. Bennieston 660fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops; 670d160211SJeremy Fitzhardinge 680d160211SJeremy Fitzhardinge struct netfront_cb { 693683243bSIan Campbell int pull_to; 700d160211SJeremy Fitzhardinge }; 710d160211SJeremy Fitzhardinge 720d160211SJeremy Fitzhardinge #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 730d160211SJeremy Fitzhardinge 740d160211SJeremy Fitzhardinge #define RX_COPY_THRESHOLD 256 750d160211SJeremy Fitzhardinge 760d160211SJeremy Fitzhardinge #define GRANT_INVALID_REF 0 770d160211SJeremy Fitzhardinge 7830c5d7f0SJulien Grall #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 7930c5d7f0SJulien Grall #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 801f3c2ebaSDavid Vrabel 811f3c2ebaSDavid Vrabel /* Minimum number of Rx slots (includes slot for GSO metadata). */ 821f3c2ebaSDavid Vrabel #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 830d160211SJeremy Fitzhardinge 842688fcb7SAndrew J. Bennieston /* Queue name is interface name with "-qNNN" appended */ 852688fcb7SAndrew J. Bennieston #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 862688fcb7SAndrew J. Bennieston 872688fcb7SAndrew J. Bennieston /* IRQ name is queue name with "-tx" or "-rx" appended */ 882688fcb7SAndrew J. Bennieston #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 892688fcb7SAndrew J. Bennieston 908edfe2e9SJuergen Gross static DECLARE_WAIT_QUEUE_HEAD(module_wq); 915b5971dfSEduardo Otubo 92e00f85beSstephen hemminger struct netfront_stats { 93900e1833SDavid Vrabel u64 packets; 94900e1833SDavid Vrabel u64 bytes; 95e00f85beSstephen hemminger struct u64_stats_sync syncp; 96e00f85beSstephen hemminger }; 97e00f85beSstephen hemminger 982688fcb7SAndrew J. Bennieston struct netfront_info; 992688fcb7SAndrew J. Bennieston 1002688fcb7SAndrew J. Bennieston struct netfront_queue { 1012688fcb7SAndrew J. Bennieston unsigned int id; /* Queue ID, 0-based */ 1022688fcb7SAndrew J. Bennieston char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 1032688fcb7SAndrew J. Bennieston struct netfront_info *info; 1040d160211SJeremy Fitzhardinge 105bea3348eSStephen Hemminger struct napi_struct napi; 1060d160211SJeremy Fitzhardinge 107d634bf2cSWei Liu /* Split event channels support, tx_* == rx_* when using 108d634bf2cSWei Liu * single event channel. 109d634bf2cSWei Liu */ 110d634bf2cSWei Liu unsigned int tx_evtchn, rx_evtchn; 111d634bf2cSWei Liu unsigned int tx_irq, rx_irq; 112d634bf2cSWei Liu /* Only used when split event channels support is enabled */ 1132688fcb7SAndrew J. Bennieston char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 1142688fcb7SAndrew J. Bennieston char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 1150d160211SJeremy Fitzhardinge 1160d160211SJeremy Fitzhardinge spinlock_t tx_lock; 11784284d3cSJeremy Fitzhardinge struct xen_netif_tx_front_ring tx; 11884284d3cSJeremy Fitzhardinge int tx_ring_ref; 1190d160211SJeremy Fitzhardinge 1200d160211SJeremy Fitzhardinge /* 1210d160211SJeremy Fitzhardinge * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 1220d160211SJeremy Fitzhardinge * are linked from tx_skb_freelist through skb_entry.link. 1230d160211SJeremy Fitzhardinge * 1240d160211SJeremy Fitzhardinge * NB. Freelist index entries are always going to be less than 1250d160211SJeremy Fitzhardinge * PAGE_OFFSET, whereas pointers to skbs will always be equal or 1260d160211SJeremy Fitzhardinge * greater than PAGE_OFFSET: we use this property to distinguish 1270d160211SJeremy Fitzhardinge * them. 1280d160211SJeremy Fitzhardinge */ 1290d160211SJeremy Fitzhardinge union skb_entry { 1300d160211SJeremy Fitzhardinge struct sk_buff *skb; 1311ffb40b8SIsaku Yamahata unsigned long link; 1320d160211SJeremy Fitzhardinge } tx_skbs[NET_TX_RING_SIZE]; 1330d160211SJeremy Fitzhardinge grant_ref_t gref_tx_head; 1340d160211SJeremy Fitzhardinge grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 135cefe0078SAnnie Li struct page *grant_tx_page[NET_TX_RING_SIZE]; 1360d160211SJeremy Fitzhardinge unsigned tx_skb_freelist; 1370d160211SJeremy Fitzhardinge 13884284d3cSJeremy Fitzhardinge spinlock_t rx_lock ____cacheline_aligned_in_smp; 13984284d3cSJeremy Fitzhardinge struct xen_netif_rx_front_ring rx; 14084284d3cSJeremy Fitzhardinge int rx_ring_ref; 14184284d3cSJeremy Fitzhardinge 14284284d3cSJeremy Fitzhardinge struct timer_list rx_refill_timer; 14384284d3cSJeremy Fitzhardinge 1440d160211SJeremy Fitzhardinge struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 1450d160211SJeremy Fitzhardinge grant_ref_t gref_rx_head; 1460d160211SJeremy Fitzhardinge grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 1472688fcb7SAndrew J. Bennieston }; 1482688fcb7SAndrew J. Bennieston 1492688fcb7SAndrew J. Bennieston struct netfront_info { 1502688fcb7SAndrew J. Bennieston struct list_head list; 1512688fcb7SAndrew J. Bennieston struct net_device *netdev; 1522688fcb7SAndrew J. Bennieston 1532688fcb7SAndrew J. Bennieston struct xenbus_device *xbdev; 1542688fcb7SAndrew J. Bennieston 1552688fcb7SAndrew J. Bennieston /* Multi-queue support */ 1562688fcb7SAndrew J. Bennieston struct netfront_queue *queues; 157e0ce4af9SIan Campbell 158e0ce4af9SIan Campbell /* Statistics */ 159900e1833SDavid Vrabel struct netfront_stats __percpu *rx_stats; 160900e1833SDavid Vrabel struct netfront_stats __percpu *tx_stats; 161e00f85beSstephen hemminger 1622688fcb7SAndrew J. Bennieston atomic_t rx_gso_checksum_fixup; 1630d160211SJeremy Fitzhardinge }; 1640d160211SJeremy Fitzhardinge 1650d160211SJeremy Fitzhardinge struct netfront_rx_info { 1660d160211SJeremy Fitzhardinge struct xen_netif_rx_response rx; 1670d160211SJeremy Fitzhardinge struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 1680d160211SJeremy Fitzhardinge }; 1690d160211SJeremy Fitzhardinge 1701ffb40b8SIsaku Yamahata static void skb_entry_set_link(union skb_entry *list, unsigned short id) 1711ffb40b8SIsaku Yamahata { 1721ffb40b8SIsaku Yamahata list->link = id; 1731ffb40b8SIsaku Yamahata } 1741ffb40b8SIsaku Yamahata 1751ffb40b8SIsaku Yamahata static int skb_entry_is_link(const union skb_entry *list) 1761ffb40b8SIsaku Yamahata { 1771ffb40b8SIsaku Yamahata BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 178807540baSEric Dumazet return (unsigned long)list->skb < PAGE_OFFSET; 1791ffb40b8SIsaku Yamahata } 1801ffb40b8SIsaku Yamahata 1810d160211SJeremy Fitzhardinge /* 1820d160211SJeremy Fitzhardinge * Access macros for acquiring freeing slots in tx_skbs[]. 1830d160211SJeremy Fitzhardinge */ 1840d160211SJeremy Fitzhardinge 1850d160211SJeremy Fitzhardinge static void add_id_to_freelist(unsigned *head, union skb_entry *list, 1860d160211SJeremy Fitzhardinge unsigned short id) 1870d160211SJeremy Fitzhardinge { 1881ffb40b8SIsaku Yamahata skb_entry_set_link(&list[id], *head); 1890d160211SJeremy Fitzhardinge *head = id; 1900d160211SJeremy Fitzhardinge } 1910d160211SJeremy Fitzhardinge 1920d160211SJeremy Fitzhardinge static unsigned short get_id_from_freelist(unsigned *head, 1930d160211SJeremy Fitzhardinge union skb_entry *list) 1940d160211SJeremy Fitzhardinge { 1950d160211SJeremy Fitzhardinge unsigned int id = *head; 1960d160211SJeremy Fitzhardinge *head = list[id].link; 1970d160211SJeremy Fitzhardinge return id; 1980d160211SJeremy Fitzhardinge } 1990d160211SJeremy Fitzhardinge 2000d160211SJeremy Fitzhardinge static int xennet_rxidx(RING_IDX idx) 2010d160211SJeremy Fitzhardinge { 2020d160211SJeremy Fitzhardinge return idx & (NET_RX_RING_SIZE - 1); 2030d160211SJeremy Fitzhardinge } 2040d160211SJeremy Fitzhardinge 2052688fcb7SAndrew J. Bennieston static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 2060d160211SJeremy Fitzhardinge RING_IDX ri) 2070d160211SJeremy Fitzhardinge { 2080d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2092688fcb7SAndrew J. Bennieston struct sk_buff *skb = queue->rx_skbs[i]; 2102688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 2110d160211SJeremy Fitzhardinge return skb; 2120d160211SJeremy Fitzhardinge } 2130d160211SJeremy Fitzhardinge 2142688fcb7SAndrew J. Bennieston static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 2150d160211SJeremy Fitzhardinge RING_IDX ri) 2160d160211SJeremy Fitzhardinge { 2170d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2182688fcb7SAndrew J. Bennieston grant_ref_t ref = queue->grant_rx_ref[i]; 2192688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 2200d160211SJeremy Fitzhardinge return ref; 2210d160211SJeremy Fitzhardinge } 2220d160211SJeremy Fitzhardinge 2230d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 22427b917e5STakashi Iwai static const struct attribute_group xennet_dev_group; 2250d160211SJeremy Fitzhardinge #endif 2260d160211SJeremy Fitzhardinge 2273ad9b358SMichał Mirosław static bool xennet_can_sg(struct net_device *dev) 2280d160211SJeremy Fitzhardinge { 2293ad9b358SMichał Mirosław return dev->features & NETIF_F_SG; 2300d160211SJeremy Fitzhardinge } 2310d160211SJeremy Fitzhardinge 2320d160211SJeremy Fitzhardinge 233e99e88a9SKees Cook static void rx_refill_timeout(struct timer_list *t) 2340d160211SJeremy Fitzhardinge { 235e99e88a9SKees Cook struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); 2362688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 2370d160211SJeremy Fitzhardinge } 2380d160211SJeremy Fitzhardinge 2392688fcb7SAndrew J. Bennieston static int netfront_tx_slot_available(struct netfront_queue *queue) 2400d160211SJeremy Fitzhardinge { 2412688fcb7SAndrew J. Bennieston return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 24257f230abSJuergen Gross (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); 2430d160211SJeremy Fitzhardinge } 2440d160211SJeremy Fitzhardinge 2452688fcb7SAndrew J. Bennieston static void xennet_maybe_wake_tx(struct netfront_queue *queue) 2460d160211SJeremy Fitzhardinge { 2472688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 2482688fcb7SAndrew J. Bennieston struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 2490d160211SJeremy Fitzhardinge 2502688fcb7SAndrew J. Bennieston if (unlikely(netif_tx_queue_stopped(dev_queue)) && 2512688fcb7SAndrew J. Bennieston netfront_tx_slot_available(queue) && 2520d160211SJeremy Fitzhardinge likely(netif_running(dev))) 2532688fcb7SAndrew J. Bennieston netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 2540d160211SJeremy Fitzhardinge } 2550d160211SJeremy Fitzhardinge 2561f3c2ebaSDavid Vrabel 2571f3c2ebaSDavid Vrabel static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) 2580d160211SJeremy Fitzhardinge { 2590d160211SJeremy Fitzhardinge struct sk_buff *skb; 2600d160211SJeremy Fitzhardinge struct page *page; 2610d160211SJeremy Fitzhardinge 2622688fcb7SAndrew J. Bennieston skb = __netdev_alloc_skb(queue->info->netdev, 2632688fcb7SAndrew J. Bennieston RX_COPY_THRESHOLD + NET_IP_ALIGN, 2640d160211SJeremy Fitzhardinge GFP_ATOMIC | __GFP_NOWARN); 2650d160211SJeremy Fitzhardinge if (unlikely(!skb)) 2661f3c2ebaSDavid Vrabel return NULL; 267617a20bbSIsaku Yamahata 2680d160211SJeremy Fitzhardinge page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 2690d160211SJeremy Fitzhardinge if (!page) { 2700d160211SJeremy Fitzhardinge kfree_skb(skb); 2711f3c2ebaSDavid Vrabel return NULL; 2720d160211SJeremy Fitzhardinge } 273093b9c71SJan Beulich skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 2740d160211SJeremy Fitzhardinge 2751f3c2ebaSDavid Vrabel /* Align ip header to a 16 bytes boundary */ 2761f3c2ebaSDavid Vrabel skb_reserve(skb, NET_IP_ALIGN); 2772688fcb7SAndrew J. Bennieston skb->dev = queue->info->netdev; 2780d160211SJeremy Fitzhardinge 2791f3c2ebaSDavid Vrabel return skb; 2801f3c2ebaSDavid Vrabel } 2811f3c2ebaSDavid Vrabel 2821f3c2ebaSDavid Vrabel 2831f3c2ebaSDavid Vrabel static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 2841f3c2ebaSDavid Vrabel { 2851f3c2ebaSDavid Vrabel RING_IDX req_prod = queue->rx.req_prod_pvt; 2861f3c2ebaSDavid Vrabel int notify; 287538d9291SVineeth Remanan Pillai int err = 0; 2881f3c2ebaSDavid Vrabel 2891f3c2ebaSDavid Vrabel if (unlikely(!netif_carrier_ok(queue->info->netdev))) 2901f3c2ebaSDavid Vrabel return; 2911f3c2ebaSDavid Vrabel 2921f3c2ebaSDavid Vrabel for (req_prod = queue->rx.req_prod_pvt; 2931f3c2ebaSDavid Vrabel req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; 2941f3c2ebaSDavid Vrabel req_prod++) { 2951f3c2ebaSDavid Vrabel struct sk_buff *skb; 2961f3c2ebaSDavid Vrabel unsigned short id; 2971f3c2ebaSDavid Vrabel grant_ref_t ref; 29830c5d7f0SJulien Grall struct page *page; 2991f3c2ebaSDavid Vrabel struct xen_netif_rx_request *req; 3001f3c2ebaSDavid Vrabel 3011f3c2ebaSDavid Vrabel skb = xennet_alloc_one_rx_buffer(queue); 302538d9291SVineeth Remanan Pillai if (!skb) { 303538d9291SVineeth Remanan Pillai err = -ENOMEM; 3041f3c2ebaSDavid Vrabel break; 305538d9291SVineeth Remanan Pillai } 3061f3c2ebaSDavid Vrabel 3071f3c2ebaSDavid Vrabel id = xennet_rxidx(req_prod); 3080d160211SJeremy Fitzhardinge 3092688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[id]); 3102688fcb7SAndrew J. Bennieston queue->rx_skbs[id] = skb; 3110d160211SJeremy Fitzhardinge 3122688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 313269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 3142688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = ref; 3150d160211SJeremy Fitzhardinge 31630c5d7f0SJulien Grall page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 3170d160211SJeremy Fitzhardinge 3181f3c2ebaSDavid Vrabel req = RING_GET_REQUEST(&queue->rx, req_prod); 31930c5d7f0SJulien Grall gnttab_page_grant_foreign_access_ref_one(ref, 3202688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id, 32130c5d7f0SJulien Grall page, 3220d160211SJeremy Fitzhardinge 0); 3230d160211SJeremy Fitzhardinge req->id = id; 3240d160211SJeremy Fitzhardinge req->gref = ref; 3250d160211SJeremy Fitzhardinge } 3260d160211SJeremy Fitzhardinge 3271f3c2ebaSDavid Vrabel queue->rx.req_prod_pvt = req_prod; 3281f3c2ebaSDavid Vrabel 329538d9291SVineeth Remanan Pillai /* Try again later if there are not enough requests or skb allocation 330538d9291SVineeth Remanan Pillai * failed. 331538d9291SVineeth Remanan Pillai * Enough requests is quantified as the sum of newly created slots and 332538d9291SVineeth Remanan Pillai * the unconsumed slots at the backend. 333538d9291SVineeth Remanan Pillai */ 334538d9291SVineeth Remanan Pillai if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || 335538d9291SVineeth Remanan Pillai unlikely(err)) { 3361f3c2ebaSDavid Vrabel mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 3371f3c2ebaSDavid Vrabel return; 3381f3c2ebaSDavid Vrabel } 3391f3c2ebaSDavid Vrabel 3402688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 3410d160211SJeremy Fitzhardinge if (notify) 3422688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 3430d160211SJeremy Fitzhardinge } 3440d160211SJeremy Fitzhardinge 3450d160211SJeremy Fitzhardinge static int xennet_open(struct net_device *dev) 3460d160211SJeremy Fitzhardinge { 3470d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 3482688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 3492688fcb7SAndrew J. Bennieston unsigned int i = 0; 3502688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 3510d160211SJeremy Fitzhardinge 352f599c64fSRoss Lagerwall if (!np->queues) 353f599c64fSRoss Lagerwall return -ENODEV; 354f599c64fSRoss Lagerwall 3552688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 3562688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 3572688fcb7SAndrew J. Bennieston napi_enable(&queue->napi); 358bea3348eSStephen Hemminger 3592688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 3600d160211SJeremy Fitzhardinge if (netif_carrier_ok(dev)) { 3612688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 3622688fcb7SAndrew J. Bennieston queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 3632688fcb7SAndrew J. Bennieston if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 3642688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 3650d160211SJeremy Fitzhardinge } 3662688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 3672688fcb7SAndrew J. Bennieston } 3680d160211SJeremy Fitzhardinge 3692688fcb7SAndrew J. Bennieston netif_tx_start_all_queues(dev); 3700d160211SJeremy Fitzhardinge 3710d160211SJeremy Fitzhardinge return 0; 3720d160211SJeremy Fitzhardinge } 3730d160211SJeremy Fitzhardinge 3742688fcb7SAndrew J. Bennieston static void xennet_tx_buf_gc(struct netfront_queue *queue) 3750d160211SJeremy Fitzhardinge { 3760d160211SJeremy Fitzhardinge RING_IDX cons, prod; 3770d160211SJeremy Fitzhardinge unsigned short id; 3780d160211SJeremy Fitzhardinge struct sk_buff *skb; 3797d0105b5SMalcolm Crossley bool more_to_do; 3800d160211SJeremy Fitzhardinge 3812688fcb7SAndrew J. Bennieston BUG_ON(!netif_carrier_ok(queue->info->netdev)); 3820d160211SJeremy Fitzhardinge 3830d160211SJeremy Fitzhardinge do { 3842688fcb7SAndrew J. Bennieston prod = queue->tx.sring->rsp_prod; 3850d160211SJeremy Fitzhardinge rmb(); /* Ensure we see responses up to 'rp'. */ 3860d160211SJeremy Fitzhardinge 3872688fcb7SAndrew J. Bennieston for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 3880d160211SJeremy Fitzhardinge struct xen_netif_tx_response *txrsp; 3890d160211SJeremy Fitzhardinge 3902688fcb7SAndrew J. Bennieston txrsp = RING_GET_RESPONSE(&queue->tx, cons); 391f942dc25SIan Campbell if (txrsp->status == XEN_NETIF_RSP_NULL) 3920d160211SJeremy Fitzhardinge continue; 3930d160211SJeremy Fitzhardinge 3940d160211SJeremy Fitzhardinge id = txrsp->id; 3952688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[id].skb; 3960d160211SJeremy Fitzhardinge if (unlikely(gnttab_query_foreign_access( 3972688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id]) != 0)) { 398383eda32SJoe Perches pr_alert("%s: warning -- grant still in use by backend domain\n", 399383eda32SJoe Perches __func__); 4000d160211SJeremy Fitzhardinge BUG(); 4010d160211SJeremy Fitzhardinge } 4020d160211SJeremy Fitzhardinge gnttab_end_foreign_access_ref( 4032688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id], GNTMAP_readonly); 4040d160211SJeremy Fitzhardinge gnttab_release_grant_reference( 4052688fcb7SAndrew J. Bennieston &queue->gref_tx_head, queue->grant_tx_ref[id]); 4062688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id] = GRANT_INVALID_REF; 4072688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = NULL; 4082688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); 4090d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 4100d160211SJeremy Fitzhardinge } 4110d160211SJeremy Fitzhardinge 4122688fcb7SAndrew J. Bennieston queue->tx.rsp_cons = prod; 4130d160211SJeremy Fitzhardinge 4147d0105b5SMalcolm Crossley RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); 4157d0105b5SMalcolm Crossley } while (more_to_do); 4160d160211SJeremy Fitzhardinge 4172688fcb7SAndrew J. Bennieston xennet_maybe_wake_tx(queue); 4180d160211SJeremy Fitzhardinge } 4190d160211SJeremy Fitzhardinge 42030c5d7f0SJulien Grall struct xennet_gnttab_make_txreq { 42130c5d7f0SJulien Grall struct netfront_queue *queue; 42230c5d7f0SJulien Grall struct sk_buff *skb; 42330c5d7f0SJulien Grall struct page *page; 42430c5d7f0SJulien Grall struct xen_netif_tx_request *tx; /* Last request */ 42530c5d7f0SJulien Grall unsigned int size; 42630c5d7f0SJulien Grall }; 42730c5d7f0SJulien Grall 42830c5d7f0SJulien Grall static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, 42930c5d7f0SJulien Grall unsigned int len, void *data) 4300d160211SJeremy Fitzhardinge { 43130c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data; 4320d160211SJeremy Fitzhardinge unsigned int id; 433a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx; 4340d160211SJeremy Fitzhardinge grant_ref_t ref; 43530c5d7f0SJulien Grall /* convenient aliases */ 43630c5d7f0SJulien Grall struct page *page = info->page; 43730c5d7f0SJulien Grall struct netfront_queue *queue = info->queue; 43830c5d7f0SJulien Grall struct sk_buff *skb = info->skb; 4390d160211SJeremy Fitzhardinge 4402688fcb7SAndrew J. Bennieston id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 441a55e8bb8SDavid Vrabel tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 4422688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 443269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 4440d160211SJeremy Fitzhardinge 44530c5d7f0SJulien Grall gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 44630c5d7f0SJulien Grall gfn, GNTMAP_readonly); 4470d160211SJeremy Fitzhardinge 448a55e8bb8SDavid Vrabel queue->tx_skbs[id].skb = skb; 449a55e8bb8SDavid Vrabel queue->grant_tx_page[id] = page; 450a55e8bb8SDavid Vrabel queue->grant_tx_ref[id] = ref; 451a55e8bb8SDavid Vrabel 452a55e8bb8SDavid Vrabel tx->id = id; 453a55e8bb8SDavid Vrabel tx->gref = ref; 4540d160211SJeremy Fitzhardinge tx->offset = offset; 4550d160211SJeremy Fitzhardinge tx->size = len; 4560d160211SJeremy Fitzhardinge tx->flags = 0; 457a55e8bb8SDavid Vrabel 45830c5d7f0SJulien Grall info->tx = tx; 45930c5d7f0SJulien Grall info->size += tx->size; 46030c5d7f0SJulien Grall } 46130c5d7f0SJulien Grall 46230c5d7f0SJulien Grall static struct xen_netif_tx_request *xennet_make_first_txreq( 46330c5d7f0SJulien Grall struct netfront_queue *queue, struct sk_buff *skb, 46430c5d7f0SJulien Grall struct page *page, unsigned int offset, unsigned int len) 46530c5d7f0SJulien Grall { 46630c5d7f0SJulien Grall struct xennet_gnttab_make_txreq info = { 46730c5d7f0SJulien Grall .queue = queue, 46830c5d7f0SJulien Grall .skb = skb, 46930c5d7f0SJulien Grall .page = page, 47030c5d7f0SJulien Grall .size = 0, 47130c5d7f0SJulien Grall }; 47230c5d7f0SJulien Grall 47330c5d7f0SJulien Grall gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); 47430c5d7f0SJulien Grall 47530c5d7f0SJulien Grall return info.tx; 47630c5d7f0SJulien Grall } 47730c5d7f0SJulien Grall 47830c5d7f0SJulien Grall static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, 47930c5d7f0SJulien Grall unsigned int len, void *data) 48030c5d7f0SJulien Grall { 48130c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data; 48230c5d7f0SJulien Grall 48330c5d7f0SJulien Grall info->tx->flags |= XEN_NETTXF_more_data; 48430c5d7f0SJulien Grall skb_get(info->skb); 48530c5d7f0SJulien Grall xennet_tx_setup_grant(gfn, offset, len, data); 4860d160211SJeremy Fitzhardinge } 4870d160211SJeremy Fitzhardinge 488a55e8bb8SDavid Vrabel static struct xen_netif_tx_request *xennet_make_txreqs( 489a55e8bb8SDavid Vrabel struct netfront_queue *queue, struct xen_netif_tx_request *tx, 490a55e8bb8SDavid Vrabel struct sk_buff *skb, struct page *page, 491a55e8bb8SDavid Vrabel unsigned int offset, unsigned int len) 492a55e8bb8SDavid Vrabel { 49330c5d7f0SJulien Grall struct xennet_gnttab_make_txreq info = { 49430c5d7f0SJulien Grall .queue = queue, 49530c5d7f0SJulien Grall .skb = skb, 49630c5d7f0SJulien Grall .tx = tx, 49730c5d7f0SJulien Grall }; 49830c5d7f0SJulien Grall 499f36c3747SIan Campbell /* Skip unused frames from start of page */ 500f36c3747SIan Campbell page += offset >> PAGE_SHIFT; 501f36c3747SIan Campbell offset &= ~PAGE_MASK; 502f36c3747SIan Campbell 503a55e8bb8SDavid Vrabel while (len) { 50430c5d7f0SJulien Grall info.page = page; 50530c5d7f0SJulien Grall info.size = 0; 50630c5d7f0SJulien Grall 50730c5d7f0SJulien Grall gnttab_foreach_grant_in_range(page, offset, len, 50830c5d7f0SJulien Grall xennet_make_one_txreq, 50930c5d7f0SJulien Grall &info); 51030c5d7f0SJulien Grall 511f36c3747SIan Campbell page++; 512f36c3747SIan Campbell offset = 0; 51330c5d7f0SJulien Grall len -= info.size; 5140d160211SJeremy Fitzhardinge } 5150d160211SJeremy Fitzhardinge 51630c5d7f0SJulien Grall return info.tx; 5170d160211SJeremy Fitzhardinge } 5180d160211SJeremy Fitzhardinge 519f36c3747SIan Campbell /* 520e84448d5SDavid Vrabel * Count how many ring slots are required to send this skb. Each frag 521e84448d5SDavid Vrabel * might be a compound page. 522f36c3747SIan Campbell */ 523e84448d5SDavid Vrabel static int xennet_count_skb_slots(struct sk_buff *skb) 524f36c3747SIan Campbell { 525f36c3747SIan Campbell int i, frags = skb_shinfo(skb)->nr_frags; 52630c5d7f0SJulien Grall int slots; 527e84448d5SDavid Vrabel 52830c5d7f0SJulien Grall slots = gnttab_count_grant(offset_in_page(skb->data), 52930c5d7f0SJulien Grall skb_headlen(skb)); 530f36c3747SIan Campbell 531f36c3747SIan Campbell for (i = 0; i < frags; i++) { 532f36c3747SIan Campbell skb_frag_t *frag = skb_shinfo(skb)->frags + i; 533f36c3747SIan Campbell unsigned long size = skb_frag_size(frag); 534f36c3747SIan Campbell unsigned long offset = frag->page_offset; 535f36c3747SIan Campbell 536f36c3747SIan Campbell /* Skip unused frames from start of page */ 537f36c3747SIan Campbell offset &= ~PAGE_MASK; 538f36c3747SIan Campbell 53930c5d7f0SJulien Grall slots += gnttab_count_grant(offset, size); 540f36c3747SIan Campbell } 541f36c3747SIan Campbell 54230c5d7f0SJulien Grall return slots; 543f36c3747SIan Campbell } 544f36c3747SIan Campbell 54550ee6061SAndrew J. Bennieston static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 546a350ecceSPaolo Abeni struct net_device *sb_dev) 5472688fcb7SAndrew J. Bennieston { 54850ee6061SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 54950ee6061SAndrew J. Bennieston u32 hash; 55050ee6061SAndrew J. Bennieston u16 queue_idx; 55150ee6061SAndrew J. Bennieston 55250ee6061SAndrew J. Bennieston /* First, check if there is only one queue */ 55350ee6061SAndrew J. Bennieston if (num_queues == 1) { 55450ee6061SAndrew J. Bennieston queue_idx = 0; 55550ee6061SAndrew J. Bennieston } else { 55650ee6061SAndrew J. Bennieston hash = skb_get_hash(skb); 55750ee6061SAndrew J. Bennieston queue_idx = hash % num_queues; 55850ee6061SAndrew J. Bennieston } 55950ee6061SAndrew J. Bennieston 56050ee6061SAndrew J. Bennieston return queue_idx; 5612688fcb7SAndrew J. Bennieston } 5622688fcb7SAndrew J. Bennieston 56330c5d7f0SJulien Grall #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 56430c5d7f0SJulien Grall 56524a94b3cSLuc Van Oostenryck static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 5660d160211SJeremy Fitzhardinge { 5670d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 568900e1833SDavid Vrabel struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 569a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx, *first_tx; 570a55e8bb8SDavid Vrabel unsigned int i; 5710d160211SJeremy Fitzhardinge int notify; 572f36c3747SIan Campbell int slots; 573a55e8bb8SDavid Vrabel struct page *page; 574a55e8bb8SDavid Vrabel unsigned int offset; 575a55e8bb8SDavid Vrabel unsigned int len; 576cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 5772688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 5782688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 5792688fcb7SAndrew J. Bennieston u16 queue_index; 580fd07160bSVitaly Kuznetsov struct sk_buff *nskb; 5812688fcb7SAndrew J. Bennieston 5822688fcb7SAndrew J. Bennieston /* Drop the packet if no queues are set up */ 5832688fcb7SAndrew J. Bennieston if (num_queues < 1) 5842688fcb7SAndrew J. Bennieston goto drop; 5852688fcb7SAndrew J. Bennieston /* Determine which queue to transmit this SKB on */ 5862688fcb7SAndrew J. Bennieston queue_index = skb_get_queue_mapping(skb); 5872688fcb7SAndrew J. Bennieston queue = &np->queues[queue_index]; 5880d160211SJeremy Fitzhardinge 5899ecd1a75SWei Liu /* If skb->len is too big for wire format, drop skb and alert 5909ecd1a75SWei Liu * user about misconfiguration. 5919ecd1a75SWei Liu */ 5929ecd1a75SWei Liu if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 5939ecd1a75SWei Liu net_alert_ratelimited( 5949ecd1a75SWei Liu "xennet: skb->len = %u, too big for wire format\n", 5959ecd1a75SWei Liu skb->len); 5969ecd1a75SWei Liu goto drop; 5979ecd1a75SWei Liu } 5989ecd1a75SWei Liu 599e84448d5SDavid Vrabel slots = xennet_count_skb_slots(skb); 60030c5d7f0SJulien Grall if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) { 60197a6d1bbSZoltan Kiss net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 60297a6d1bbSZoltan Kiss slots, skb->len); 60397a6d1bbSZoltan Kiss if (skb_linearize(skb)) 6040d160211SJeremy Fitzhardinge goto drop; 6050d160211SJeremy Fitzhardinge } 6060d160211SJeremy Fitzhardinge 607a55e8bb8SDavid Vrabel page = virt_to_page(skb->data); 608a55e8bb8SDavid Vrabel offset = offset_in_page(skb->data); 609fd07160bSVitaly Kuznetsov 610fd07160bSVitaly Kuznetsov /* The first req should be at least ETH_HLEN size or the packet will be 611fd07160bSVitaly Kuznetsov * dropped by netback. 612fd07160bSVitaly Kuznetsov */ 613fd07160bSVitaly Kuznetsov if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 614fd07160bSVitaly Kuznetsov nskb = skb_copy(skb, GFP_ATOMIC); 615fd07160bSVitaly Kuznetsov if (!nskb) 616fd07160bSVitaly Kuznetsov goto drop; 61762f3250fSEric Dumazet dev_consume_skb_any(skb); 618fd07160bSVitaly Kuznetsov skb = nskb; 619fd07160bSVitaly Kuznetsov page = virt_to_page(skb->data); 620fd07160bSVitaly Kuznetsov offset = offset_in_page(skb->data); 621fd07160bSVitaly Kuznetsov } 622fd07160bSVitaly Kuznetsov 623a55e8bb8SDavid Vrabel len = skb_headlen(skb); 624a55e8bb8SDavid Vrabel 6252688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 6260d160211SJeremy Fitzhardinge 6270d160211SJeremy Fitzhardinge if (unlikely(!netif_carrier_ok(dev) || 628f36c3747SIan Campbell (slots > 1 && !xennet_can_sg(dev)) || 6298b86a61dSJohannes Berg netif_needs_gso(skb, netif_skb_features(skb)))) { 6302688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 6310d160211SJeremy Fitzhardinge goto drop; 6320d160211SJeremy Fitzhardinge } 6330d160211SJeremy Fitzhardinge 634a55e8bb8SDavid Vrabel /* First request for the linear area. */ 63530c5d7f0SJulien Grall first_tx = tx = xennet_make_first_txreq(queue, skb, 636a55e8bb8SDavid Vrabel page, offset, len); 63730c5d7f0SJulien Grall offset += tx->size; 63830c5d7f0SJulien Grall if (offset == PAGE_SIZE) { 639a55e8bb8SDavid Vrabel page++; 640a55e8bb8SDavid Vrabel offset = 0; 64130c5d7f0SJulien Grall } 642a55e8bb8SDavid Vrabel len -= tx->size; 6430d160211SJeremy Fitzhardinge 6440d160211SJeremy Fitzhardinge if (skb->ip_summed == CHECKSUM_PARTIAL) 6450d160211SJeremy Fitzhardinge /* local packet? */ 646f942dc25SIan Campbell tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 6470d160211SJeremy Fitzhardinge else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 6480d160211SJeremy Fitzhardinge /* remote but checksummed. */ 649f942dc25SIan Campbell tx->flags |= XEN_NETTXF_data_validated; 6500d160211SJeremy Fitzhardinge 651a55e8bb8SDavid Vrabel /* Optional extra info after the first request. */ 6520d160211SJeremy Fitzhardinge if (skb_shinfo(skb)->gso_size) { 6530d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 6540d160211SJeremy Fitzhardinge 6550d160211SJeremy Fitzhardinge gso = (struct xen_netif_extra_info *) 656a55e8bb8SDavid Vrabel RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 6570d160211SJeremy Fitzhardinge 658f942dc25SIan Campbell tx->flags |= XEN_NETTXF_extra_info; 6590d160211SJeremy Fitzhardinge 6600d160211SJeremy Fitzhardinge gso->u.gso.size = skb_shinfo(skb)->gso_size; 6612c0057deSPaul Durrant gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 6622c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV6 : 6632c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV4; 6640d160211SJeremy Fitzhardinge gso->u.gso.pad = 0; 6650d160211SJeremy Fitzhardinge gso->u.gso.features = 0; 6660d160211SJeremy Fitzhardinge 6670d160211SJeremy Fitzhardinge gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 6680d160211SJeremy Fitzhardinge gso->flags = 0; 6690d160211SJeremy Fitzhardinge } 6700d160211SJeremy Fitzhardinge 671a55e8bb8SDavid Vrabel /* Requests for the rest of the linear area. */ 672a55e8bb8SDavid Vrabel tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); 6730d160211SJeremy Fitzhardinge 674a55e8bb8SDavid Vrabel /* Requests for all the frags. */ 675a55e8bb8SDavid Vrabel for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 676a55e8bb8SDavid Vrabel skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 677a55e8bb8SDavid Vrabel tx = xennet_make_txreqs(queue, tx, skb, 678a55e8bb8SDavid Vrabel skb_frag_page(frag), frag->page_offset, 679a55e8bb8SDavid Vrabel skb_frag_size(frag)); 680a55e8bb8SDavid Vrabel } 681a55e8bb8SDavid Vrabel 682a55e8bb8SDavid Vrabel /* First request has the packet length. */ 683a55e8bb8SDavid Vrabel first_tx->size = skb->len; 6840d160211SJeremy Fitzhardinge 6852688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 6860d160211SJeremy Fitzhardinge if (notify) 6872688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 6880d160211SJeremy Fitzhardinge 689900e1833SDavid Vrabel u64_stats_update_begin(&tx_stats->syncp); 690900e1833SDavid Vrabel tx_stats->bytes += skb->len; 691900e1833SDavid Vrabel tx_stats->packets++; 692900e1833SDavid Vrabel u64_stats_update_end(&tx_stats->syncp); 69310a273a6SJeremy Fitzhardinge 69410a273a6SJeremy Fitzhardinge /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 6952688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 6960d160211SJeremy Fitzhardinge 6972688fcb7SAndrew J. Bennieston if (!netfront_tx_slot_available(queue)) 6982688fcb7SAndrew J. Bennieston netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 6990d160211SJeremy Fitzhardinge 7002688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 7010d160211SJeremy Fitzhardinge 7026ed10654SPatrick McHardy return NETDEV_TX_OK; 7030d160211SJeremy Fitzhardinge 7040d160211SJeremy Fitzhardinge drop: 70509f75cd7SJeff Garzik dev->stats.tx_dropped++; 706979de8a0SEric W. Biederman dev_kfree_skb_any(skb); 7076ed10654SPatrick McHardy return NETDEV_TX_OK; 7080d160211SJeremy Fitzhardinge } 7090d160211SJeremy Fitzhardinge 7100d160211SJeremy Fitzhardinge static int xennet_close(struct net_device *dev) 7110d160211SJeremy Fitzhardinge { 7120d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 7132688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 7142688fcb7SAndrew J. Bennieston unsigned int i; 7152688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 7162688fcb7SAndrew J. Bennieston netif_tx_stop_all_queues(np->netdev); 7172688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 7182688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 7192688fcb7SAndrew J. Bennieston napi_disable(&queue->napi); 7202688fcb7SAndrew J. Bennieston } 7210d160211SJeremy Fitzhardinge return 0; 7220d160211SJeremy Fitzhardinge } 7230d160211SJeremy Fitzhardinge 7242688fcb7SAndrew J. Bennieston static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 7250d160211SJeremy Fitzhardinge grant_ref_t ref) 7260d160211SJeremy Fitzhardinge { 7272688fcb7SAndrew J. Bennieston int new = xennet_rxidx(queue->rx.req_prod_pvt); 7280d160211SJeremy Fitzhardinge 7292688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[new]); 7302688fcb7SAndrew J. Bennieston queue->rx_skbs[new] = skb; 7312688fcb7SAndrew J. Bennieston queue->grant_rx_ref[new] = ref; 7322688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 7332688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 7342688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt++; 7350d160211SJeremy Fitzhardinge } 7360d160211SJeremy Fitzhardinge 7372688fcb7SAndrew J. Bennieston static int xennet_get_extras(struct netfront_queue *queue, 7380d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras, 7390d160211SJeremy Fitzhardinge RING_IDX rp) 7400d160211SJeremy Fitzhardinge 7410d160211SJeremy Fitzhardinge { 7420d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extra; 7432688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7442688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7450d160211SJeremy Fitzhardinge int err = 0; 7460d160211SJeremy Fitzhardinge 7470d160211SJeremy Fitzhardinge do { 7480d160211SJeremy Fitzhardinge struct sk_buff *skb; 7490d160211SJeremy Fitzhardinge grant_ref_t ref; 7500d160211SJeremy Fitzhardinge 7510d160211SJeremy Fitzhardinge if (unlikely(cons + 1 == rp)) { 7520d160211SJeremy Fitzhardinge if (net_ratelimit()) 7530d160211SJeremy Fitzhardinge dev_warn(dev, "Missing extra info\n"); 7540d160211SJeremy Fitzhardinge err = -EBADR; 7550d160211SJeremy Fitzhardinge break; 7560d160211SJeremy Fitzhardinge } 7570d160211SJeremy Fitzhardinge 7580d160211SJeremy Fitzhardinge extra = (struct xen_netif_extra_info *) 7592688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 7600d160211SJeremy Fitzhardinge 7610d160211SJeremy Fitzhardinge if (unlikely(!extra->type || 7620d160211SJeremy Fitzhardinge extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 7630d160211SJeremy Fitzhardinge if (net_ratelimit()) 7640d160211SJeremy Fitzhardinge dev_warn(dev, "Invalid extra type: %d\n", 7650d160211SJeremy Fitzhardinge extra->type); 7660d160211SJeremy Fitzhardinge err = -EINVAL; 7670d160211SJeremy Fitzhardinge } else { 7680d160211SJeremy Fitzhardinge memcpy(&extras[extra->type - 1], extra, 7690d160211SJeremy Fitzhardinge sizeof(*extra)); 7700d160211SJeremy Fitzhardinge } 7710d160211SJeremy Fitzhardinge 7722688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons); 7732688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons); 7742688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 7750d160211SJeremy Fitzhardinge } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 7760d160211SJeremy Fitzhardinge 7772688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons; 7780d160211SJeremy Fitzhardinge return err; 7790d160211SJeremy Fitzhardinge } 7800d160211SJeremy Fitzhardinge 7812688fcb7SAndrew J. Bennieston static int xennet_get_responses(struct netfront_queue *queue, 7820d160211SJeremy Fitzhardinge struct netfront_rx_info *rinfo, RING_IDX rp, 7830d160211SJeremy Fitzhardinge struct sk_buff_head *list) 7840d160211SJeremy Fitzhardinge { 7850d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo->rx; 7860d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo->extras; 7872688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7882688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7892688fcb7SAndrew J. Bennieston struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 7902688fcb7SAndrew J. Bennieston grant_ref_t ref = xennet_get_rx_ref(queue, cons); 79157f230abSJuergen Gross int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); 7927158ff6dSWei Liu int slots = 1; 7930d160211SJeremy Fitzhardinge int err = 0; 7940d160211SJeremy Fitzhardinge unsigned long ret; 7950d160211SJeremy Fitzhardinge 796f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_extra_info) { 7972688fcb7SAndrew J. Bennieston err = xennet_get_extras(queue, extras, rp); 7982688fcb7SAndrew J. Bennieston cons = queue->rx.rsp_cons; 7990d160211SJeremy Fitzhardinge } 8000d160211SJeremy Fitzhardinge 8010d160211SJeremy Fitzhardinge for (;;) { 8020d160211SJeremy Fitzhardinge if (unlikely(rx->status < 0 || 80330c5d7f0SJulien Grall rx->offset + rx->status > XEN_PAGE_SIZE)) { 8040d160211SJeremy Fitzhardinge if (net_ratelimit()) 8056c10127dSJulien Grall dev_warn(dev, "rx->offset: %u, size: %d\n", 8060d160211SJeremy Fitzhardinge rx->offset, rx->status); 8072688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 8080d160211SJeremy Fitzhardinge err = -EINVAL; 8090d160211SJeremy Fitzhardinge goto next; 8100d160211SJeremy Fitzhardinge } 8110d160211SJeremy Fitzhardinge 8120d160211SJeremy Fitzhardinge /* 8130d160211SJeremy Fitzhardinge * This definitely indicates a bug, either in this driver or in 8140d160211SJeremy Fitzhardinge * the backend driver. In future this should flag the bad 815697089dcSWei Liu * situation to the system controller to reboot the backend. 8160d160211SJeremy Fitzhardinge */ 8170d160211SJeremy Fitzhardinge if (ref == GRANT_INVALID_REF) { 8180d160211SJeremy Fitzhardinge if (net_ratelimit()) 8190d160211SJeremy Fitzhardinge dev_warn(dev, "Bad rx response id %d.\n", 8200d160211SJeremy Fitzhardinge rx->id); 8210d160211SJeremy Fitzhardinge err = -EINVAL; 8220d160211SJeremy Fitzhardinge goto next; 8230d160211SJeremy Fitzhardinge } 8240d160211SJeremy Fitzhardinge 8250d160211SJeremy Fitzhardinge ret = gnttab_end_foreign_access_ref(ref, 0); 8260d160211SJeremy Fitzhardinge BUG_ON(!ret); 8270d160211SJeremy Fitzhardinge 8282688fcb7SAndrew J. Bennieston gnttab_release_grant_reference(&queue->gref_rx_head, ref); 8290d160211SJeremy Fitzhardinge 8300d160211SJeremy Fitzhardinge __skb_queue_tail(list, skb); 8310d160211SJeremy Fitzhardinge 8320d160211SJeremy Fitzhardinge next: 833f942dc25SIan Campbell if (!(rx->flags & XEN_NETRXF_more_data)) 8340d160211SJeremy Fitzhardinge break; 8350d160211SJeremy Fitzhardinge 8367158ff6dSWei Liu if (cons + slots == rp) { 8370d160211SJeremy Fitzhardinge if (net_ratelimit()) 8387158ff6dSWei Liu dev_warn(dev, "Need more slots\n"); 8390d160211SJeremy Fitzhardinge err = -ENOENT; 8400d160211SJeremy Fitzhardinge break; 8410d160211SJeremy Fitzhardinge } 8420d160211SJeremy Fitzhardinge 8432688fcb7SAndrew J. Bennieston rx = RING_GET_RESPONSE(&queue->rx, cons + slots); 8442688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons + slots); 8452688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons + slots); 8467158ff6dSWei Liu slots++; 8470d160211SJeremy Fitzhardinge } 8480d160211SJeremy Fitzhardinge 8497158ff6dSWei Liu if (unlikely(slots > max)) { 8500d160211SJeremy Fitzhardinge if (net_ratelimit()) 851697089dcSWei Liu dev_warn(dev, "Too many slots\n"); 8520d160211SJeremy Fitzhardinge err = -E2BIG; 8530d160211SJeremy Fitzhardinge } 8540d160211SJeremy Fitzhardinge 8550d160211SJeremy Fitzhardinge if (unlikely(err)) 8562688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons + slots; 8570d160211SJeremy Fitzhardinge 8580d160211SJeremy Fitzhardinge return err; 8590d160211SJeremy Fitzhardinge } 8600d160211SJeremy Fitzhardinge 8610d160211SJeremy Fitzhardinge static int xennet_set_skb_gso(struct sk_buff *skb, 8620d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso) 8630d160211SJeremy Fitzhardinge { 8640d160211SJeremy Fitzhardinge if (!gso->u.gso.size) { 8650d160211SJeremy Fitzhardinge if (net_ratelimit()) 866383eda32SJoe Perches pr_warn("GSO size must not be zero\n"); 8670d160211SJeremy Fitzhardinge return -EINVAL; 8680d160211SJeremy Fitzhardinge } 8690d160211SJeremy Fitzhardinge 8702c0057deSPaul Durrant if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 8712c0057deSPaul Durrant gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 8720d160211SJeremy Fitzhardinge if (net_ratelimit()) 873383eda32SJoe Perches pr_warn("Bad GSO type %d\n", gso->u.gso.type); 8740d160211SJeremy Fitzhardinge return -EINVAL; 8750d160211SJeremy Fitzhardinge } 8760d160211SJeremy Fitzhardinge 8770d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_size = gso->u.gso.size; 8782c0057deSPaul Durrant skb_shinfo(skb)->gso_type = 8792c0057deSPaul Durrant (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 8802c0057deSPaul Durrant SKB_GSO_TCPV4 : 8812c0057deSPaul Durrant SKB_GSO_TCPV6; 8820d160211SJeremy Fitzhardinge 8830d160211SJeremy Fitzhardinge /* Header must be checked, and gso_segs computed. */ 8840d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 8850d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_segs = 0; 8860d160211SJeremy Fitzhardinge 8870d160211SJeremy Fitzhardinge return 0; 8880d160211SJeremy Fitzhardinge } 8890d160211SJeremy Fitzhardinge 8902688fcb7SAndrew J. Bennieston static RING_IDX xennet_fill_frags(struct netfront_queue *queue, 8910d160211SJeremy Fitzhardinge struct sk_buff *skb, 8920d160211SJeremy Fitzhardinge struct sk_buff_head *list) 8930d160211SJeremy Fitzhardinge { 8942688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 8950d160211SJeremy Fitzhardinge struct sk_buff *nskb; 8960d160211SJeremy Fitzhardinge 8970d160211SJeremy Fitzhardinge while ((nskb = __skb_dequeue(list))) { 8980d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = 8992688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 90001c68026SIan Campbell skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 9010d160211SJeremy Fitzhardinge 902d472b3a6SJuergen Gross if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 903093b9c71SJan Beulich unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 9040d160211SJeremy Fitzhardinge 905d81c5054SJuergen Gross BUG_ON(pull_to < skb_headlen(skb)); 906093b9c71SJan Beulich __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 907093b9c71SJan Beulich } 908ad4f15dcSJuergen Gross if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 90900b36850SDongli Zhang queue->rx.rsp_cons = ++cons + skb_queue_len(list); 910ad4f15dcSJuergen Gross kfree_skb(nskb); 911ad4f15dcSJuergen Gross return ~0U; 912ad4f15dcSJuergen Gross } 913093b9c71SJan Beulich 914d472b3a6SJuergen Gross skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 915d472b3a6SJuergen Gross skb_frag_page(nfrag), 916093b9c71SJan Beulich rx->offset, rx->status, PAGE_SIZE); 9170d160211SJeremy Fitzhardinge 9180d160211SJeremy Fitzhardinge skb_shinfo(nskb)->nr_frags = 0; 9190d160211SJeremy Fitzhardinge kfree_skb(nskb); 9200d160211SJeremy Fitzhardinge } 9210d160211SJeremy Fitzhardinge 9220d160211SJeremy Fitzhardinge return cons; 9230d160211SJeremy Fitzhardinge } 9240d160211SJeremy Fitzhardinge 925e0ce4af9SIan Campbell static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 9260d160211SJeremy Fitzhardinge { 927b5cf66cdSPaul Durrant bool recalculate_partial_csum = false; 928e0ce4af9SIan Campbell 929e0ce4af9SIan Campbell /* 930e0ce4af9SIan Campbell * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 931e0ce4af9SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO 932e0ce4af9SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and 933e0ce4af9SIan Campbell * recalculate the partial checksum. 934e0ce4af9SIan Campbell */ 935e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 936e0ce4af9SIan Campbell struct netfront_info *np = netdev_priv(dev); 9372688fcb7SAndrew J. Bennieston atomic_inc(&np->rx_gso_checksum_fixup); 938e0ce4af9SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL; 939b5cf66cdSPaul Durrant recalculate_partial_csum = true; 940e0ce4af9SIan Campbell } 941e0ce4af9SIan Campbell 942e0ce4af9SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 943e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL) 944e0ce4af9SIan Campbell return 0; 9450d160211SJeremy Fitzhardinge 946b5cf66cdSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum); 9470d160211SJeremy Fitzhardinge } 9480d160211SJeremy Fitzhardinge 9492688fcb7SAndrew J. Bennieston static int handle_incoming_queue(struct netfront_queue *queue, 9500d160211SJeremy Fitzhardinge struct sk_buff_head *rxq) 9510d160211SJeremy Fitzhardinge { 952900e1833SDavid Vrabel struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 9530d160211SJeremy Fitzhardinge int packets_dropped = 0; 9540d160211SJeremy Fitzhardinge struct sk_buff *skb; 9550d160211SJeremy Fitzhardinge 9560d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(rxq)) != NULL) { 9573683243bSIan Campbell int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 9580d160211SJeremy Fitzhardinge 959093b9c71SJan Beulich if (pull_to > skb_headlen(skb)) 9603683243bSIan Campbell __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 9610d160211SJeremy Fitzhardinge 9620d160211SJeremy Fitzhardinge /* Ethernet work: Delayed to here as it peeks the header. */ 9632688fcb7SAndrew J. Bennieston skb->protocol = eth_type_trans(skb, queue->info->netdev); 964d554f73dSWei Liu skb_reset_network_header(skb); 9650d160211SJeremy Fitzhardinge 9662688fcb7SAndrew J. Bennieston if (checksum_setup(queue->info->netdev, skb)) { 9670d160211SJeremy Fitzhardinge kfree_skb(skb); 9680d160211SJeremy Fitzhardinge packets_dropped++; 9692688fcb7SAndrew J. Bennieston queue->info->netdev->stats.rx_errors++; 9700d160211SJeremy Fitzhardinge continue; 9710d160211SJeremy Fitzhardinge } 9720d160211SJeremy Fitzhardinge 973900e1833SDavid Vrabel u64_stats_update_begin(&rx_stats->syncp); 974900e1833SDavid Vrabel rx_stats->packets++; 975900e1833SDavid Vrabel rx_stats->bytes += skb->len; 976900e1833SDavid Vrabel u64_stats_update_end(&rx_stats->syncp); 9770d160211SJeremy Fitzhardinge 9780d160211SJeremy Fitzhardinge /* Pass it up. */ 9792688fcb7SAndrew J. Bennieston napi_gro_receive(&queue->napi, skb); 9800d160211SJeremy Fitzhardinge } 9810d160211SJeremy Fitzhardinge 9820d160211SJeremy Fitzhardinge return packets_dropped; 9830d160211SJeremy Fitzhardinge } 9840d160211SJeremy Fitzhardinge 985bea3348eSStephen Hemminger static int xennet_poll(struct napi_struct *napi, int budget) 9860d160211SJeremy Fitzhardinge { 9872688fcb7SAndrew J. Bennieston struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 9882688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 9890d160211SJeremy Fitzhardinge struct sk_buff *skb; 9900d160211SJeremy Fitzhardinge struct netfront_rx_info rinfo; 9910d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo.rx; 9920d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo.extras; 9930d160211SJeremy Fitzhardinge RING_IDX i, rp; 994bea3348eSStephen Hemminger int work_done; 9950d160211SJeremy Fitzhardinge struct sk_buff_head rxq; 9960d160211SJeremy Fitzhardinge struct sk_buff_head errq; 9970d160211SJeremy Fitzhardinge struct sk_buff_head tmpq; 9980d160211SJeremy Fitzhardinge int err; 9990d160211SJeremy Fitzhardinge 10002688fcb7SAndrew J. Bennieston spin_lock(&queue->rx_lock); 10010d160211SJeremy Fitzhardinge 10020d160211SJeremy Fitzhardinge skb_queue_head_init(&rxq); 10030d160211SJeremy Fitzhardinge skb_queue_head_init(&errq); 10040d160211SJeremy Fitzhardinge skb_queue_head_init(&tmpq); 10050d160211SJeremy Fitzhardinge 10062688fcb7SAndrew J. Bennieston rp = queue->rx.sring->rsp_prod; 10070d160211SJeremy Fitzhardinge rmb(); /* Ensure we see queued responses up to 'rp'. */ 10080d160211SJeremy Fitzhardinge 10092688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 10100d160211SJeremy Fitzhardinge work_done = 0; 10110d160211SJeremy Fitzhardinge while ((i != rp) && (work_done < budget)) { 10122688fcb7SAndrew J. Bennieston memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); 10130d160211SJeremy Fitzhardinge memset(extras, 0, sizeof(rinfo.extras)); 10140d160211SJeremy Fitzhardinge 10152688fcb7SAndrew J. Bennieston err = xennet_get_responses(queue, &rinfo, rp, &tmpq); 10160d160211SJeremy Fitzhardinge 10170d160211SJeremy Fitzhardinge if (unlikely(err)) { 10180d160211SJeremy Fitzhardinge err: 10190d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(&tmpq))) 10200d160211SJeremy Fitzhardinge __skb_queue_tail(&errq, skb); 102109f75cd7SJeff Garzik dev->stats.rx_errors++; 10222688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 10230d160211SJeremy Fitzhardinge continue; 10240d160211SJeremy Fitzhardinge } 10250d160211SJeremy Fitzhardinge 10260d160211SJeremy Fitzhardinge skb = __skb_dequeue(&tmpq); 10270d160211SJeremy Fitzhardinge 10280d160211SJeremy Fitzhardinge if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 10290d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 10300d160211SJeremy Fitzhardinge gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 10310d160211SJeremy Fitzhardinge 10320d160211SJeremy Fitzhardinge if (unlikely(xennet_set_skb_gso(skb, gso))) { 10330d160211SJeremy Fitzhardinge __skb_queue_head(&tmpq, skb); 10342688fcb7SAndrew J. Bennieston queue->rx.rsp_cons += skb_queue_len(&tmpq); 10350d160211SJeremy Fitzhardinge goto err; 10360d160211SJeremy Fitzhardinge } 10370d160211SJeremy Fitzhardinge } 10380d160211SJeremy Fitzhardinge 10393683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = rx->status; 10403683243bSIan Campbell if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 10413683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 10420d160211SJeremy Fitzhardinge 10433683243bSIan Campbell skb_shinfo(skb)->frags[0].page_offset = rx->offset; 10443683243bSIan Campbell skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 10453683243bSIan Campbell skb->data_len = rx->status; 1046093b9c71SJan Beulich skb->len += rx->status; 10470d160211SJeremy Fitzhardinge 10482688fcb7SAndrew J. Bennieston i = xennet_fill_frags(queue, skb, &tmpq); 1049ad4f15dcSJuergen Gross if (unlikely(i == ~0U)) 1050ad4f15dcSJuergen Gross goto err; 10510d160211SJeremy Fitzhardinge 1052f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_csum_blank) 10530d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_PARTIAL; 1054f942dc25SIan Campbell else if (rx->flags & XEN_NETRXF_data_validated) 10550d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_UNNECESSARY; 10560d160211SJeremy Fitzhardinge 10570d160211SJeremy Fitzhardinge __skb_queue_tail(&rxq, skb); 10580d160211SJeremy Fitzhardinge 10592688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = ++i; 10600d160211SJeremy Fitzhardinge work_done++; 10610d160211SJeremy Fitzhardinge } 10620d160211SJeremy Fitzhardinge 106356cfe5d0SWang Chen __skb_queue_purge(&errq); 10640d160211SJeremy Fitzhardinge 10652688fcb7SAndrew J. Bennieston work_done -= handle_incoming_queue(queue, &rxq); 10660d160211SJeremy Fitzhardinge 10672688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 10680d160211SJeremy Fitzhardinge 10690d160211SJeremy Fitzhardinge if (work_done < budget) { 1070bea3348eSStephen Hemminger int more_to_do = 0; 1071bea3348eSStephen Hemminger 10726ad20165SEric Dumazet napi_complete_done(napi, work_done); 10730d160211SJeremy Fitzhardinge 10742688fcb7SAndrew J. Bennieston RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 10756a6dc08fSDavid Vrabel if (more_to_do) 10766a6dc08fSDavid Vrabel napi_schedule(napi); 10770d160211SJeremy Fitzhardinge } 10780d160211SJeremy Fitzhardinge 10792688fcb7SAndrew J. Bennieston spin_unlock(&queue->rx_lock); 10800d160211SJeremy Fitzhardinge 1081bea3348eSStephen Hemminger return work_done; 10820d160211SJeremy Fitzhardinge } 10830d160211SJeremy Fitzhardinge 10840d160211SJeremy Fitzhardinge static int xennet_change_mtu(struct net_device *dev, int mtu) 10850d160211SJeremy Fitzhardinge { 10860c36820eSJonathan Davies int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 10870d160211SJeremy Fitzhardinge 10880d160211SJeremy Fitzhardinge if (mtu > max) 10890d160211SJeremy Fitzhardinge return -EINVAL; 10900d160211SJeremy Fitzhardinge dev->mtu = mtu; 10910d160211SJeremy Fitzhardinge return 0; 10920d160211SJeremy Fitzhardinge } 10930d160211SJeremy Fitzhardinge 1094bc1f4470Sstephen hemminger static void xennet_get_stats64(struct net_device *dev, 1095e00f85beSstephen hemminger struct rtnl_link_stats64 *tot) 1096e00f85beSstephen hemminger { 1097e00f85beSstephen hemminger struct netfront_info *np = netdev_priv(dev); 1098e00f85beSstephen hemminger int cpu; 1099e00f85beSstephen hemminger 1100e00f85beSstephen hemminger for_each_possible_cpu(cpu) { 1101900e1833SDavid Vrabel struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1102900e1833SDavid Vrabel struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1103e00f85beSstephen hemminger u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1104e00f85beSstephen hemminger unsigned int start; 1105e00f85beSstephen hemminger 1106e00f85beSstephen hemminger do { 1107900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1108900e1833SDavid Vrabel tx_packets = tx_stats->packets; 1109900e1833SDavid Vrabel tx_bytes = tx_stats->bytes; 1110900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1111e00f85beSstephen hemminger 1112900e1833SDavid Vrabel do { 1113900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1114900e1833SDavid Vrabel rx_packets = rx_stats->packets; 1115900e1833SDavid Vrabel rx_bytes = rx_stats->bytes; 1116900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1117e00f85beSstephen hemminger 1118e00f85beSstephen hemminger tot->rx_packets += rx_packets; 1119e00f85beSstephen hemminger tot->tx_packets += tx_packets; 1120e00f85beSstephen hemminger tot->rx_bytes += rx_bytes; 1121e00f85beSstephen hemminger tot->tx_bytes += tx_bytes; 1122e00f85beSstephen hemminger } 1123e00f85beSstephen hemminger 1124e00f85beSstephen hemminger tot->rx_errors = dev->stats.rx_errors; 1125e00f85beSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1126e00f85beSstephen hemminger } 1127e00f85beSstephen hemminger 11282688fcb7SAndrew J. Bennieston static void xennet_release_tx_bufs(struct netfront_queue *queue) 11290d160211SJeremy Fitzhardinge { 11300d160211SJeremy Fitzhardinge struct sk_buff *skb; 11310d160211SJeremy Fitzhardinge int i; 11320d160211SJeremy Fitzhardinge 11330d160211SJeremy Fitzhardinge for (i = 0; i < NET_TX_RING_SIZE; i++) { 11340d160211SJeremy Fitzhardinge /* Skip over entries which are actually freelist references */ 11352688fcb7SAndrew J. Bennieston if (skb_entry_is_link(&queue->tx_skbs[i])) 11360d160211SJeremy Fitzhardinge continue; 11370d160211SJeremy Fitzhardinge 11382688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[i].skb; 11392688fcb7SAndrew J. Bennieston get_page(queue->grant_tx_page[i]); 11402688fcb7SAndrew J. Bennieston gnttab_end_foreign_access(queue->grant_tx_ref[i], 1141cefe0078SAnnie Li GNTMAP_readonly, 11422688fcb7SAndrew J. Bennieston (unsigned long)page_address(queue->grant_tx_page[i])); 11432688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 11442688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 11452688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); 11460d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 11470d160211SJeremy Fitzhardinge } 11480d160211SJeremy Fitzhardinge } 11490d160211SJeremy Fitzhardinge 11502688fcb7SAndrew J. Bennieston static void xennet_release_rx_bufs(struct netfront_queue *queue) 11510d160211SJeremy Fitzhardinge { 11520d160211SJeremy Fitzhardinge int id, ref; 11530d160211SJeremy Fitzhardinge 11542688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 11550d160211SJeremy Fitzhardinge 11560d160211SJeremy Fitzhardinge for (id = 0; id < NET_RX_RING_SIZE; id++) { 1157cefe0078SAnnie Li struct sk_buff *skb; 1158cefe0078SAnnie Li struct page *page; 11590d160211SJeremy Fitzhardinge 11602688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[id]; 1161cefe0078SAnnie Li if (!skb) 1162cefe0078SAnnie Li continue; 1163cefe0078SAnnie Li 11642688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[id]; 1165cefe0078SAnnie Li if (ref == GRANT_INVALID_REF) 1166cefe0078SAnnie Li continue; 1167cefe0078SAnnie Li 1168cefe0078SAnnie Li page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1169cefe0078SAnnie Li 1170cefe0078SAnnie Li /* gnttab_end_foreign_access() needs a page ref until 1171cefe0078SAnnie Li * foreign access is ended (which may be deferred). 1172cefe0078SAnnie Li */ 1173cefe0078SAnnie Li get_page(page); 1174cefe0078SAnnie Li gnttab_end_foreign_access(ref, 0, 1175cefe0078SAnnie Li (unsigned long)page_address(page)); 11762688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = GRANT_INVALID_REF; 11770d160211SJeremy Fitzhardinge 1178cefe0078SAnnie Li kfree_skb(skb); 11790d160211SJeremy Fitzhardinge } 11800d160211SJeremy Fitzhardinge 11812688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 11820d160211SJeremy Fitzhardinge } 11830d160211SJeremy Fitzhardinge 1184c8f44affSMichał Mirosław static netdev_features_t xennet_fix_features(struct net_device *dev, 1185c8f44affSMichał Mirosław netdev_features_t features) 11868f7b01a1SEric Dumazet { 11878f7b01a1SEric Dumazet struct netfront_info *np = netdev_priv(dev); 11888f7b01a1SEric Dumazet 11892890ea5cSJuergen Gross if (features & NETIF_F_SG && 11902890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) 11918f7b01a1SEric Dumazet features &= ~NETIF_F_SG; 11928f7b01a1SEric Dumazet 11932890ea5cSJuergen Gross if (features & NETIF_F_IPV6_CSUM && 11942890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, 11952890ea5cSJuergen Gross "feature-ipv6-csum-offload", 0)) 11962c0057deSPaul Durrant features &= ~NETIF_F_IPV6_CSUM; 11972c0057deSPaul Durrant 11982890ea5cSJuergen Gross if (features & NETIF_F_TSO && 11992890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) 12008f7b01a1SEric Dumazet features &= ~NETIF_F_TSO; 12018f7b01a1SEric Dumazet 12022890ea5cSJuergen Gross if (features & NETIF_F_TSO6 && 12032890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) 12042c0057deSPaul Durrant features &= ~NETIF_F_TSO6; 12052c0057deSPaul Durrant 12068f7b01a1SEric Dumazet return features; 12078f7b01a1SEric Dumazet } 12088f7b01a1SEric Dumazet 1209c8f44affSMichał Mirosław static int xennet_set_features(struct net_device *dev, 1210c8f44affSMichał Mirosław netdev_features_t features) 12118f7b01a1SEric Dumazet { 12128f7b01a1SEric Dumazet if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 12138f7b01a1SEric Dumazet netdev_info(dev, "Reducing MTU because no SG offload"); 12148f7b01a1SEric Dumazet dev->mtu = ETH_DATA_LEN; 12158f7b01a1SEric Dumazet } 12168f7b01a1SEric Dumazet 12178f7b01a1SEric Dumazet return 0; 12188f7b01a1SEric Dumazet } 12198f7b01a1SEric Dumazet 1220d634bf2cSWei Liu static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1221cf66f9d4SKonrad Rzeszutek Wilk { 12222688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 1223cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 1224cf66f9d4SKonrad Rzeszutek Wilk 12252688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 12262688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 12272688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 1228cf66f9d4SKonrad Rzeszutek Wilk 1229cf66f9d4SKonrad Rzeszutek Wilk return IRQ_HANDLED; 1230cf66f9d4SKonrad Rzeszutek Wilk } 1231cf66f9d4SKonrad Rzeszutek Wilk 1232d634bf2cSWei Liu static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1233d634bf2cSWei Liu { 12342688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 12352688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 1236d634bf2cSWei Liu 1237d634bf2cSWei Liu if (likely(netif_carrier_ok(dev) && 12382688fcb7SAndrew J. Bennieston RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 12392688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 1240d634bf2cSWei Liu 1241d634bf2cSWei Liu return IRQ_HANDLED; 1242d634bf2cSWei Liu } 1243d634bf2cSWei Liu 1244d634bf2cSWei Liu static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1245d634bf2cSWei Liu { 1246d634bf2cSWei Liu xennet_tx_interrupt(irq, dev_id); 1247d634bf2cSWei Liu xennet_rx_interrupt(irq, dev_id); 1248d634bf2cSWei Liu return IRQ_HANDLED; 1249d634bf2cSWei Liu } 1250d634bf2cSWei Liu 1251cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1252cf66f9d4SKonrad Rzeszutek Wilk static void xennet_poll_controller(struct net_device *dev) 1253cf66f9d4SKonrad Rzeszutek Wilk { 12542688fcb7SAndrew J. Bennieston /* Poll each queue */ 12552688fcb7SAndrew J. Bennieston struct netfront_info *info = netdev_priv(dev); 12562688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 12572688fcb7SAndrew J. Bennieston unsigned int i; 12582688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) 12592688fcb7SAndrew J. Bennieston xennet_interrupt(0, &info->queues[i]); 1260cf66f9d4SKonrad Rzeszutek Wilk } 1261cf66f9d4SKonrad Rzeszutek Wilk #endif 1262cf66f9d4SKonrad Rzeszutek Wilk 12630a0b9d2eSStephen Hemminger static const struct net_device_ops xennet_netdev_ops = { 12640a0b9d2eSStephen Hemminger .ndo_open = xennet_open, 12650a0b9d2eSStephen Hemminger .ndo_stop = xennet_close, 12660a0b9d2eSStephen Hemminger .ndo_start_xmit = xennet_start_xmit, 12670a0b9d2eSStephen Hemminger .ndo_change_mtu = xennet_change_mtu, 1268e00f85beSstephen hemminger .ndo_get_stats64 = xennet_get_stats64, 12690a0b9d2eSStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 12700a0b9d2eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1271fb507934SMichał Mirosław .ndo_fix_features = xennet_fix_features, 1272fb507934SMichał Mirosław .ndo_set_features = xennet_set_features, 12732688fcb7SAndrew J. Bennieston .ndo_select_queue = xennet_select_queue, 1274cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1275cf66f9d4SKonrad Rzeszutek Wilk .ndo_poll_controller = xennet_poll_controller, 1276cf66f9d4SKonrad Rzeszutek Wilk #endif 12770a0b9d2eSStephen Hemminger }; 12780a0b9d2eSStephen Hemminger 1279900e1833SDavid Vrabel static void xennet_free_netdev(struct net_device *netdev) 1280900e1833SDavid Vrabel { 1281900e1833SDavid Vrabel struct netfront_info *np = netdev_priv(netdev); 1282900e1833SDavid Vrabel 1283900e1833SDavid Vrabel free_percpu(np->rx_stats); 1284900e1833SDavid Vrabel free_percpu(np->tx_stats); 1285900e1833SDavid Vrabel free_netdev(netdev); 1286900e1833SDavid Vrabel } 1287900e1833SDavid Vrabel 12888e0e46bbSBill Pemberton static struct net_device *xennet_create_dev(struct xenbus_device *dev) 12890d160211SJeremy Fitzhardinge { 12902688fcb7SAndrew J. Bennieston int err; 12910d160211SJeremy Fitzhardinge struct net_device *netdev; 12920d160211SJeremy Fitzhardinge struct netfront_info *np; 12930d160211SJeremy Fitzhardinge 129450ee6061SAndrew J. Bennieston netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 129541de8d4cSJoe Perches if (!netdev) 12960d160211SJeremy Fitzhardinge return ERR_PTR(-ENOMEM); 12970d160211SJeremy Fitzhardinge 12980d160211SJeremy Fitzhardinge np = netdev_priv(netdev); 12990d160211SJeremy Fitzhardinge np->xbdev = dev; 13000d160211SJeremy Fitzhardinge 13012688fcb7SAndrew J. Bennieston np->queues = NULL; 13020d160211SJeremy Fitzhardinge 1303e00f85beSstephen hemminger err = -ENOMEM; 1304900e1833SDavid Vrabel np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1305900e1833SDavid Vrabel if (np->rx_stats == NULL) 1306900e1833SDavid Vrabel goto exit; 1307900e1833SDavid Vrabel np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1308900e1833SDavid Vrabel if (np->tx_stats == NULL) 1309e00f85beSstephen hemminger goto exit; 1310e00f85beSstephen hemminger 13110a0b9d2eSStephen Hemminger netdev->netdev_ops = &xennet_netdev_ops; 13120a0b9d2eSStephen Hemminger 1313fb507934SMichał Mirosław netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1314fb507934SMichał Mirosław NETIF_F_GSO_ROBUST; 13152c0057deSPaul Durrant netdev->hw_features = NETIF_F_SG | 13162c0057deSPaul Durrant NETIF_F_IPV6_CSUM | 13172c0057deSPaul Durrant NETIF_F_TSO | NETIF_F_TSO6; 13180d160211SJeremy Fitzhardinge 1319fc3e5941SIan Campbell /* 1320fc3e5941SIan Campbell * Assume that all hw features are available for now. This set 1321fc3e5941SIan Campbell * will be adjusted by the call to netdev_update_features() in 1322fc3e5941SIan Campbell * xennet_connect() which is the earliest point where we can 1323fc3e5941SIan Campbell * negotiate with the backend regarding supported features. 1324fc3e5941SIan Campbell */ 1325fc3e5941SIan Campbell netdev->features |= netdev->hw_features; 1326fc3e5941SIan Campbell 13277ad24ea4SWilfried Klaebe netdev->ethtool_ops = &xennet_ethtool_ops; 1328e1043a4bSMohammed Gamal netdev->min_mtu = ETH_MIN_MTU; 1329d0c2c997SJarod Wilson netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 13300d160211SJeremy Fitzhardinge SET_NETDEV_DEV(netdev, &dev->dev); 13310d160211SJeremy Fitzhardinge 13320d160211SJeremy Fitzhardinge np->netdev = netdev; 13330d160211SJeremy Fitzhardinge 13340d160211SJeremy Fitzhardinge netif_carrier_off(netdev); 13350d160211SJeremy Fitzhardinge 1336b707fda2SEduardo Otubo xenbus_switch_state(dev, XenbusStateInitialising); 13378edfe2e9SJuergen Gross wait_event(module_wq, 1338822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) != 1339822fb18aSXiao Liang XenbusStateClosed && 1340822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) != 1341822fb18aSXiao Liang XenbusStateUnknown); 13420d160211SJeremy Fitzhardinge return netdev; 13430d160211SJeremy Fitzhardinge 13440d160211SJeremy Fitzhardinge exit: 1345900e1833SDavid Vrabel xennet_free_netdev(netdev); 13460d160211SJeremy Fitzhardinge return ERR_PTR(err); 13470d160211SJeremy Fitzhardinge } 13480d160211SJeremy Fitzhardinge 13490d160211SJeremy Fitzhardinge /** 13500d160211SJeremy Fitzhardinge * Entry point to this code when a new device is created. Allocate the basic 13510d160211SJeremy Fitzhardinge * structures and the ring buffers for communication with the backend, and 13520d160211SJeremy Fitzhardinge * inform the backend of the appropriate details for those. 13530d160211SJeremy Fitzhardinge */ 13548e0e46bbSBill Pemberton static int netfront_probe(struct xenbus_device *dev, 13550d160211SJeremy Fitzhardinge const struct xenbus_device_id *id) 13560d160211SJeremy Fitzhardinge { 13570d160211SJeremy Fitzhardinge int err; 13580d160211SJeremy Fitzhardinge struct net_device *netdev; 13590d160211SJeremy Fitzhardinge struct netfront_info *info; 13600d160211SJeremy Fitzhardinge 13610d160211SJeremy Fitzhardinge netdev = xennet_create_dev(dev); 13620d160211SJeremy Fitzhardinge if (IS_ERR(netdev)) { 13630d160211SJeremy Fitzhardinge err = PTR_ERR(netdev); 13640d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "creating netdev"); 13650d160211SJeremy Fitzhardinge return err; 13660d160211SJeremy Fitzhardinge } 13670d160211SJeremy Fitzhardinge 13680d160211SJeremy Fitzhardinge info = netdev_priv(netdev); 13691b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, info); 137027b917e5STakashi Iwai #ifdef CONFIG_SYSFS 137127b917e5STakashi Iwai info->netdev->sysfs_groups[0] = &xennet_dev_group; 137227b917e5STakashi Iwai #endif 13730d160211SJeremy Fitzhardinge 13740d160211SJeremy Fitzhardinge return 0; 13750d160211SJeremy Fitzhardinge } 13760d160211SJeremy Fitzhardinge 13770d160211SJeremy Fitzhardinge static void xennet_end_access(int ref, void *page) 13780d160211SJeremy Fitzhardinge { 13790d160211SJeremy Fitzhardinge /* This frees the page as a side-effect */ 13800d160211SJeremy Fitzhardinge if (ref != GRANT_INVALID_REF) 13810d160211SJeremy Fitzhardinge gnttab_end_foreign_access(ref, 0, (unsigned long)page); 13820d160211SJeremy Fitzhardinge } 13830d160211SJeremy Fitzhardinge 13840d160211SJeremy Fitzhardinge static void xennet_disconnect_backend(struct netfront_info *info) 13850d160211SJeremy Fitzhardinge { 13862688fcb7SAndrew J. Bennieston unsigned int i = 0; 13872688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues; 13880d160211SJeremy Fitzhardinge 1389f9feb1e6SDavid Vrabel netif_carrier_off(info->netdev); 1390f9feb1e6SDavid Vrabel 13919a873c71SChas Williams for (i = 0; i < num_queues && info->queues; ++i) { 139276541869SDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 139376541869SDavid Vrabel 139474470954SBoris Ostrovsky del_timer_sync(&queue->rx_refill_timer); 139574470954SBoris Ostrovsky 13962688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 13972688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 13982688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 13992688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 14002688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->rx_irq, queue); 1401d634bf2cSWei Liu } 14022688fcb7SAndrew J. Bennieston queue->tx_evtchn = queue->rx_evtchn = 0; 14032688fcb7SAndrew J. Bennieston queue->tx_irq = queue->rx_irq = 0; 14040d160211SJeremy Fitzhardinge 1405274b0455SChas Williams if (netif_running(info->netdev)) 1406f9feb1e6SDavid Vrabel napi_synchronize(&queue->napi); 1407f9feb1e6SDavid Vrabel 1408a5b5dc3cSDavid Vrabel xennet_release_tx_bufs(queue); 1409a5b5dc3cSDavid Vrabel xennet_release_rx_bufs(queue); 1410a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_tx_head); 1411a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_rx_head); 1412a5b5dc3cSDavid Vrabel 14130d160211SJeremy Fitzhardinge /* End access and free the pages */ 14142688fcb7SAndrew J. Bennieston xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 14152688fcb7SAndrew J. Bennieston xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 14160d160211SJeremy Fitzhardinge 14172688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 14182688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 14192688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 14202688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 14212688fcb7SAndrew J. Bennieston } 14220d160211SJeremy Fitzhardinge } 14230d160211SJeremy Fitzhardinge 14240d160211SJeremy Fitzhardinge /** 14250d160211SJeremy Fitzhardinge * We are reconnecting to the backend, due to a suspend/resume, or a backend 14260d160211SJeremy Fitzhardinge * driver restart. We tear down our netif structure and recreate it, but 14270d160211SJeremy Fitzhardinge * leave the device-layer structures intact so that this is transparent to the 14280d160211SJeremy Fitzhardinge * rest of the kernel. 14290d160211SJeremy Fitzhardinge */ 14300d160211SJeremy Fitzhardinge static int netfront_resume(struct xenbus_device *dev) 14310d160211SJeremy Fitzhardinge { 14321b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 14330d160211SJeremy Fitzhardinge 14340d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 14350d160211SJeremy Fitzhardinge 14360d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 14370d160211SJeremy Fitzhardinge return 0; 14380d160211SJeremy Fitzhardinge } 14390d160211SJeremy Fitzhardinge 14400d160211SJeremy Fitzhardinge static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 14410d160211SJeremy Fitzhardinge { 14420d160211SJeremy Fitzhardinge char *s, *e, *macstr; 14430d160211SJeremy Fitzhardinge int i; 14440d160211SJeremy Fitzhardinge 14450d160211SJeremy Fitzhardinge macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 14460d160211SJeremy Fitzhardinge if (IS_ERR(macstr)) 14470d160211SJeremy Fitzhardinge return PTR_ERR(macstr); 14480d160211SJeremy Fitzhardinge 14490d160211SJeremy Fitzhardinge for (i = 0; i < ETH_ALEN; i++) { 14500d160211SJeremy Fitzhardinge mac[i] = simple_strtoul(s, &e, 16); 14510d160211SJeremy Fitzhardinge if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 14520d160211SJeremy Fitzhardinge kfree(macstr); 14530d160211SJeremy Fitzhardinge return -ENOENT; 14540d160211SJeremy Fitzhardinge } 14550d160211SJeremy Fitzhardinge s = e+1; 14560d160211SJeremy Fitzhardinge } 14570d160211SJeremy Fitzhardinge 14580d160211SJeremy Fitzhardinge kfree(macstr); 14590d160211SJeremy Fitzhardinge return 0; 14600d160211SJeremy Fitzhardinge } 14610d160211SJeremy Fitzhardinge 14622688fcb7SAndrew J. Bennieston static int setup_netfront_single(struct netfront_queue *queue) 1463d634bf2cSWei Liu { 1464d634bf2cSWei Liu int err; 1465d634bf2cSWei Liu 14662688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1467d634bf2cSWei Liu if (err < 0) 1468d634bf2cSWei Liu goto fail; 1469d634bf2cSWei Liu 14702688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1471d634bf2cSWei Liu xennet_interrupt, 14722688fcb7SAndrew J. Bennieston 0, queue->info->netdev->name, queue); 1473d634bf2cSWei Liu if (err < 0) 1474d634bf2cSWei Liu goto bind_fail; 14752688fcb7SAndrew J. Bennieston queue->rx_evtchn = queue->tx_evtchn; 14762688fcb7SAndrew J. Bennieston queue->rx_irq = queue->tx_irq = err; 1477d634bf2cSWei Liu 1478d634bf2cSWei Liu return 0; 1479d634bf2cSWei Liu 1480d634bf2cSWei Liu bind_fail: 14812688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 14822688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1483d634bf2cSWei Liu fail: 1484d634bf2cSWei Liu return err; 1485d634bf2cSWei Liu } 1486d634bf2cSWei Liu 14872688fcb7SAndrew J. Bennieston static int setup_netfront_split(struct netfront_queue *queue) 1488d634bf2cSWei Liu { 1489d634bf2cSWei Liu int err; 1490d634bf2cSWei Liu 14912688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1492d634bf2cSWei Liu if (err < 0) 1493d634bf2cSWei Liu goto fail; 14942688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1495d634bf2cSWei Liu if (err < 0) 1496d634bf2cSWei Liu goto alloc_rx_evtchn_fail; 1497d634bf2cSWei Liu 14982688fcb7SAndrew J. Bennieston snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 14992688fcb7SAndrew J. Bennieston "%s-tx", queue->name); 15002688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1501d634bf2cSWei Liu xennet_tx_interrupt, 15022688fcb7SAndrew J. Bennieston 0, queue->tx_irq_name, queue); 1503d634bf2cSWei Liu if (err < 0) 1504d634bf2cSWei Liu goto bind_tx_fail; 15052688fcb7SAndrew J. Bennieston queue->tx_irq = err; 1506d634bf2cSWei Liu 15072688fcb7SAndrew J. Bennieston snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 15082688fcb7SAndrew J. Bennieston "%s-rx", queue->name); 15092688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->rx_evtchn, 1510d634bf2cSWei Liu xennet_rx_interrupt, 15112688fcb7SAndrew J. Bennieston 0, queue->rx_irq_name, queue); 1512d634bf2cSWei Liu if (err < 0) 1513d634bf2cSWei Liu goto bind_rx_fail; 15142688fcb7SAndrew J. Bennieston queue->rx_irq = err; 1515d634bf2cSWei Liu 1516d634bf2cSWei Liu return 0; 1517d634bf2cSWei Liu 1518d634bf2cSWei Liu bind_rx_fail: 15192688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 15202688fcb7SAndrew J. Bennieston queue->tx_irq = 0; 1521d634bf2cSWei Liu bind_tx_fail: 15222688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 15232688fcb7SAndrew J. Bennieston queue->rx_evtchn = 0; 1524d634bf2cSWei Liu alloc_rx_evtchn_fail: 15252688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 15262688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1527d634bf2cSWei Liu fail: 1528d634bf2cSWei Liu return err; 1529d634bf2cSWei Liu } 1530d634bf2cSWei Liu 15312688fcb7SAndrew J. Bennieston static int setup_netfront(struct xenbus_device *dev, 15322688fcb7SAndrew J. Bennieston struct netfront_queue *queue, unsigned int feature_split_evtchn) 15330d160211SJeremy Fitzhardinge { 15340d160211SJeremy Fitzhardinge struct xen_netif_tx_sring *txs; 15350d160211SJeremy Fitzhardinge struct xen_netif_rx_sring *rxs; 1536ccc9d90aSWei Liu grant_ref_t gref; 15370d160211SJeremy Fitzhardinge int err; 15380d160211SJeremy Fitzhardinge 15392688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 15402688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 15412688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 15422688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 15430d160211SJeremy Fitzhardinge 1544a144ff09SIan Campbell txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 15450d160211SJeremy Fitzhardinge if (!txs) { 15460d160211SJeremy Fitzhardinge err = -ENOMEM; 15470d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating tx ring page"); 15480d160211SJeremy Fitzhardinge goto fail; 15490d160211SJeremy Fitzhardinge } 15500d160211SJeremy Fitzhardinge SHARED_RING_INIT(txs); 155130c5d7f0SJulien Grall FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 15520d160211SJeremy Fitzhardinge 1553ccc9d90aSWei Liu err = xenbus_grant_ring(dev, txs, 1, &gref); 15541ca2983aSWei Liu if (err < 0) 15551ca2983aSWei Liu goto grant_tx_ring_fail; 1556ccc9d90aSWei Liu queue->tx_ring_ref = gref; 15570d160211SJeremy Fitzhardinge 1558a144ff09SIan Campbell rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 15590d160211SJeremy Fitzhardinge if (!rxs) { 15600d160211SJeremy Fitzhardinge err = -ENOMEM; 15610d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating rx ring page"); 15621ca2983aSWei Liu goto alloc_rx_ring_fail; 15630d160211SJeremy Fitzhardinge } 15640d160211SJeremy Fitzhardinge SHARED_RING_INIT(rxs); 156530c5d7f0SJulien Grall FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 15660d160211SJeremy Fitzhardinge 1567ccc9d90aSWei Liu err = xenbus_grant_ring(dev, rxs, 1, &gref); 15681ca2983aSWei Liu if (err < 0) 15691ca2983aSWei Liu goto grant_rx_ring_fail; 1570ccc9d90aSWei Liu queue->rx_ring_ref = gref; 15710d160211SJeremy Fitzhardinge 1572d634bf2cSWei Liu if (feature_split_evtchn) 15732688fcb7SAndrew J. Bennieston err = setup_netfront_split(queue); 1574d634bf2cSWei Liu /* setup single event channel if 1575d634bf2cSWei Liu * a) feature-split-event-channels == 0 1576d634bf2cSWei Liu * b) feature-split-event-channels == 1 but failed to setup 1577d634bf2cSWei Liu */ 1578d634bf2cSWei Liu if (!feature_split_evtchn || (feature_split_evtchn && err)) 15792688fcb7SAndrew J. Bennieston err = setup_netfront_single(queue); 1580d634bf2cSWei Liu 15810d160211SJeremy Fitzhardinge if (err) 15821ca2983aSWei Liu goto alloc_evtchn_fail; 15830d160211SJeremy Fitzhardinge 15840d160211SJeremy Fitzhardinge return 0; 15850d160211SJeremy Fitzhardinge 15861ca2983aSWei Liu /* If we fail to setup netfront, it is safe to just revoke access to 15871ca2983aSWei Liu * granted pages because backend is not accessing it at this point. 15881ca2983aSWei Liu */ 15891ca2983aSWei Liu alloc_evtchn_fail: 15902688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); 15911ca2983aSWei Liu grant_rx_ring_fail: 15921ca2983aSWei Liu free_page((unsigned long)rxs); 15931ca2983aSWei Liu alloc_rx_ring_fail: 15942688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); 15951ca2983aSWei Liu grant_tx_ring_fail: 15961ca2983aSWei Liu free_page((unsigned long)txs); 15970d160211SJeremy Fitzhardinge fail: 15980d160211SJeremy Fitzhardinge return err; 15990d160211SJeremy Fitzhardinge } 16000d160211SJeremy Fitzhardinge 16012688fcb7SAndrew J. Bennieston /* Queue-specific initialisation 16022688fcb7SAndrew J. Bennieston * This used to be done in xennet_create_dev() but must now 16032688fcb7SAndrew J. Bennieston * be run per-queue. 16042688fcb7SAndrew J. Bennieston */ 16052688fcb7SAndrew J. Bennieston static int xennet_init_queue(struct netfront_queue *queue) 16062688fcb7SAndrew J. Bennieston { 16072688fcb7SAndrew J. Bennieston unsigned short i; 16082688fcb7SAndrew J. Bennieston int err = 0; 160921f2706bSXiao Liang char *devid; 16102688fcb7SAndrew J. Bennieston 16112688fcb7SAndrew J. Bennieston spin_lock_init(&queue->tx_lock); 16122688fcb7SAndrew J. Bennieston spin_lock_init(&queue->rx_lock); 16132688fcb7SAndrew J. Bennieston 1614e99e88a9SKees Cook timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 16152688fcb7SAndrew J. Bennieston 161621f2706bSXiao Liang devid = strrchr(queue->info->xbdev->nodename, '/') + 1; 161721f2706bSXiao Liang snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", 161821f2706bSXiao Liang devid, queue->id); 16198b715010SWei Liu 16202688fcb7SAndrew J. Bennieston /* Initialise tx_skbs as a free chain containing every entry. */ 16212688fcb7SAndrew J. Bennieston queue->tx_skb_freelist = 0; 16222688fcb7SAndrew J. Bennieston for (i = 0; i < NET_TX_RING_SIZE; i++) { 16232688fcb7SAndrew J. Bennieston skb_entry_set_link(&queue->tx_skbs[i], i+1); 16242688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 16252688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 16262688fcb7SAndrew J. Bennieston } 16272688fcb7SAndrew J. Bennieston 16282688fcb7SAndrew J. Bennieston /* Clear out rx_skbs */ 16292688fcb7SAndrew J. Bennieston for (i = 0; i < NET_RX_RING_SIZE; i++) { 16302688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 16312688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 16322688fcb7SAndrew J. Bennieston } 16332688fcb7SAndrew J. Bennieston 16342688fcb7SAndrew J. Bennieston /* A grant for every tx ring slot */ 16351f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 16362688fcb7SAndrew J. Bennieston &queue->gref_tx_head) < 0) { 16372688fcb7SAndrew J. Bennieston pr_alert("can't alloc tx grant refs\n"); 16382688fcb7SAndrew J. Bennieston err = -ENOMEM; 16392688fcb7SAndrew J. Bennieston goto exit; 16402688fcb7SAndrew J. Bennieston } 16412688fcb7SAndrew J. Bennieston 16422688fcb7SAndrew J. Bennieston /* A grant for every rx ring slot */ 16431f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 16442688fcb7SAndrew J. Bennieston &queue->gref_rx_head) < 0) { 16452688fcb7SAndrew J. Bennieston pr_alert("can't alloc rx grant refs\n"); 16462688fcb7SAndrew J. Bennieston err = -ENOMEM; 16472688fcb7SAndrew J. Bennieston goto exit_free_tx; 16482688fcb7SAndrew J. Bennieston } 16492688fcb7SAndrew J. Bennieston 16502688fcb7SAndrew J. Bennieston return 0; 16512688fcb7SAndrew J. Bennieston 16522688fcb7SAndrew J. Bennieston exit_free_tx: 16532688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head); 16542688fcb7SAndrew J. Bennieston exit: 16552688fcb7SAndrew J. Bennieston return err; 16562688fcb7SAndrew J. Bennieston } 16572688fcb7SAndrew J. Bennieston 165850ee6061SAndrew J. Bennieston static int write_queue_xenstore_keys(struct netfront_queue *queue, 165950ee6061SAndrew J. Bennieston struct xenbus_transaction *xbt, int write_hierarchical) 166050ee6061SAndrew J. Bennieston { 166150ee6061SAndrew J. Bennieston /* Write the queue-specific keys into XenStore in the traditional 166250ee6061SAndrew J. Bennieston * way for a single queue, or in a queue subkeys for multiple 166350ee6061SAndrew J. Bennieston * queues. 166450ee6061SAndrew J. Bennieston */ 166550ee6061SAndrew J. Bennieston struct xenbus_device *dev = queue->info->xbdev; 166650ee6061SAndrew J. Bennieston int err; 166750ee6061SAndrew J. Bennieston const char *message; 166850ee6061SAndrew J. Bennieston char *path; 166950ee6061SAndrew J. Bennieston size_t pathsize; 167050ee6061SAndrew J. Bennieston 167150ee6061SAndrew J. Bennieston /* Choose the correct place to write the keys */ 167250ee6061SAndrew J. Bennieston if (write_hierarchical) { 167350ee6061SAndrew J. Bennieston pathsize = strlen(dev->nodename) + 10; 167450ee6061SAndrew J. Bennieston path = kzalloc(pathsize, GFP_KERNEL); 167550ee6061SAndrew J. Bennieston if (!path) { 167650ee6061SAndrew J. Bennieston err = -ENOMEM; 167750ee6061SAndrew J. Bennieston message = "out of memory while writing ring references"; 167850ee6061SAndrew J. Bennieston goto error; 167950ee6061SAndrew J. Bennieston } 168050ee6061SAndrew J. Bennieston snprintf(path, pathsize, "%s/queue-%u", 168150ee6061SAndrew J. Bennieston dev->nodename, queue->id); 168250ee6061SAndrew J. Bennieston } else { 168350ee6061SAndrew J. Bennieston path = (char *)dev->nodename; 168450ee6061SAndrew J. Bennieston } 168550ee6061SAndrew J. Bennieston 168650ee6061SAndrew J. Bennieston /* Write ring references */ 168750ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 168850ee6061SAndrew J. Bennieston queue->tx_ring_ref); 168950ee6061SAndrew J. Bennieston if (err) { 169050ee6061SAndrew J. Bennieston message = "writing tx-ring-ref"; 169150ee6061SAndrew J. Bennieston goto error; 169250ee6061SAndrew J. Bennieston } 169350ee6061SAndrew J. Bennieston 169450ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 169550ee6061SAndrew J. Bennieston queue->rx_ring_ref); 169650ee6061SAndrew J. Bennieston if (err) { 169750ee6061SAndrew J. Bennieston message = "writing rx-ring-ref"; 169850ee6061SAndrew J. Bennieston goto error; 169950ee6061SAndrew J. Bennieston } 170050ee6061SAndrew J. Bennieston 170150ee6061SAndrew J. Bennieston /* Write event channels; taking into account both shared 170250ee6061SAndrew J. Bennieston * and split event channel scenarios. 170350ee6061SAndrew J. Bennieston */ 170450ee6061SAndrew J. Bennieston if (queue->tx_evtchn == queue->rx_evtchn) { 170550ee6061SAndrew J. Bennieston /* Shared event channel */ 170650ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 170750ee6061SAndrew J. Bennieston "event-channel", "%u", queue->tx_evtchn); 170850ee6061SAndrew J. Bennieston if (err) { 170950ee6061SAndrew J. Bennieston message = "writing event-channel"; 171050ee6061SAndrew J. Bennieston goto error; 171150ee6061SAndrew J. Bennieston } 171250ee6061SAndrew J. Bennieston } else { 171350ee6061SAndrew J. Bennieston /* Split event channels */ 171450ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 171550ee6061SAndrew J. Bennieston "event-channel-tx", "%u", queue->tx_evtchn); 171650ee6061SAndrew J. Bennieston if (err) { 171750ee6061SAndrew J. Bennieston message = "writing event-channel-tx"; 171850ee6061SAndrew J. Bennieston goto error; 171950ee6061SAndrew J. Bennieston } 172050ee6061SAndrew J. Bennieston 172150ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 172250ee6061SAndrew J. Bennieston "event-channel-rx", "%u", queue->rx_evtchn); 172350ee6061SAndrew J. Bennieston if (err) { 172450ee6061SAndrew J. Bennieston message = "writing event-channel-rx"; 172550ee6061SAndrew J. Bennieston goto error; 172650ee6061SAndrew J. Bennieston } 172750ee6061SAndrew J. Bennieston } 172850ee6061SAndrew J. Bennieston 172950ee6061SAndrew J. Bennieston if (write_hierarchical) 173050ee6061SAndrew J. Bennieston kfree(path); 173150ee6061SAndrew J. Bennieston return 0; 173250ee6061SAndrew J. Bennieston 173350ee6061SAndrew J. Bennieston error: 173450ee6061SAndrew J. Bennieston if (write_hierarchical) 173550ee6061SAndrew J. Bennieston kfree(path); 173650ee6061SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "%s", message); 173750ee6061SAndrew J. Bennieston return err; 173850ee6061SAndrew J. Bennieston } 173950ee6061SAndrew J. Bennieston 1740ce58725fSDavid Vrabel static void xennet_destroy_queues(struct netfront_info *info) 1741ce58725fSDavid Vrabel { 1742ce58725fSDavid Vrabel unsigned int i; 1743ce58725fSDavid Vrabel 1744ce58725fSDavid Vrabel for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 1745ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1746ce58725fSDavid Vrabel 1747ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1748ce58725fSDavid Vrabel napi_disable(&queue->napi); 1749ce58725fSDavid Vrabel netif_napi_del(&queue->napi); 1750ce58725fSDavid Vrabel } 1751ce58725fSDavid Vrabel 1752ce58725fSDavid Vrabel kfree(info->queues); 1753ce58725fSDavid Vrabel info->queues = NULL; 1754ce58725fSDavid Vrabel } 1755ce58725fSDavid Vrabel 1756ce58725fSDavid Vrabel static int xennet_create_queues(struct netfront_info *info, 1757ca88ea12SJoe Jin unsigned int *num_queues) 1758ce58725fSDavid Vrabel { 1759ce58725fSDavid Vrabel unsigned int i; 1760ce58725fSDavid Vrabel int ret; 1761ce58725fSDavid Vrabel 1762ca88ea12SJoe Jin info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), 1763ce58725fSDavid Vrabel GFP_KERNEL); 1764ce58725fSDavid Vrabel if (!info->queues) 1765ce58725fSDavid Vrabel return -ENOMEM; 1766ce58725fSDavid Vrabel 1767ca88ea12SJoe Jin for (i = 0; i < *num_queues; i++) { 1768ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1769ce58725fSDavid Vrabel 1770ce58725fSDavid Vrabel queue->id = i; 1771ce58725fSDavid Vrabel queue->info = info; 1772ce58725fSDavid Vrabel 1773ce58725fSDavid Vrabel ret = xennet_init_queue(queue); 1774ce58725fSDavid Vrabel if (ret < 0) { 1775f599c64fSRoss Lagerwall dev_warn(&info->xbdev->dev, 177669cb8524SDavid Vrabel "only created %d queues\n", i); 1777ca88ea12SJoe Jin *num_queues = i; 1778ce58725fSDavid Vrabel break; 1779ce58725fSDavid Vrabel } 1780ce58725fSDavid Vrabel 1781ce58725fSDavid Vrabel netif_napi_add(queue->info->netdev, &queue->napi, 1782ce58725fSDavid Vrabel xennet_poll, 64); 1783ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1784ce58725fSDavid Vrabel napi_enable(&queue->napi); 1785ce58725fSDavid Vrabel } 1786ce58725fSDavid Vrabel 1787ca88ea12SJoe Jin netif_set_real_num_tx_queues(info->netdev, *num_queues); 1788ce58725fSDavid Vrabel 1789ca88ea12SJoe Jin if (*num_queues == 0) { 1790f599c64fSRoss Lagerwall dev_err(&info->xbdev->dev, "no queues\n"); 1791ce58725fSDavid Vrabel return -EINVAL; 1792ce58725fSDavid Vrabel } 1793ce58725fSDavid Vrabel return 0; 1794ce58725fSDavid Vrabel } 1795ce58725fSDavid Vrabel 17960d160211SJeremy Fitzhardinge /* Common code used when first setting up, and when resuming. */ 1797f502bf2bSIan Campbell static int talk_to_netback(struct xenbus_device *dev, 17980d160211SJeremy Fitzhardinge struct netfront_info *info) 17990d160211SJeremy Fitzhardinge { 18000d160211SJeremy Fitzhardinge const char *message; 18010d160211SJeremy Fitzhardinge struct xenbus_transaction xbt; 18020d160211SJeremy Fitzhardinge int err; 18032688fcb7SAndrew J. Bennieston unsigned int feature_split_evtchn; 18042688fcb7SAndrew J. Bennieston unsigned int i = 0; 180550ee6061SAndrew J. Bennieston unsigned int max_queues = 0; 18062688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 18072688fcb7SAndrew J. Bennieston unsigned int num_queues = 1; 18080d160211SJeremy Fitzhardinge 18092688fcb7SAndrew J. Bennieston info->netdev->irq = 0; 18102688fcb7SAndrew J. Bennieston 181150ee6061SAndrew J. Bennieston /* Check if backend supports multiple queues */ 18122890ea5cSJuergen Gross max_queues = xenbus_read_unsigned(info->xbdev->otherend, 18132890ea5cSJuergen Gross "multi-queue-max-queues", 1); 181450ee6061SAndrew J. Bennieston num_queues = min(max_queues, xennet_max_queues); 181550ee6061SAndrew J. Bennieston 18162688fcb7SAndrew J. Bennieston /* Check feature-split-event-channels */ 18172890ea5cSJuergen Gross feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, 18182890ea5cSJuergen Gross "feature-split-event-channels", 0); 18192688fcb7SAndrew J. Bennieston 18202688fcb7SAndrew J. Bennieston /* Read mac addr. */ 18212688fcb7SAndrew J. Bennieston err = xen_net_read_mac(dev, info->netdev->dev_addr); 18222688fcb7SAndrew J. Bennieston if (err) { 18232688fcb7SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1824cb257783SRoss Lagerwall goto out_unlocked; 18252688fcb7SAndrew J. Bennieston } 18262688fcb7SAndrew J. Bennieston 1827f599c64fSRoss Lagerwall rtnl_lock(); 1828ce58725fSDavid Vrabel if (info->queues) 1829ce58725fSDavid Vrabel xennet_destroy_queues(info); 1830ce58725fSDavid Vrabel 1831ca88ea12SJoe Jin err = xennet_create_queues(info, &num_queues); 1832e2e004acSRoss Lagerwall if (err < 0) { 1833e2e004acSRoss Lagerwall xenbus_dev_fatal(dev, err, "creating queues"); 1834e2e004acSRoss Lagerwall kfree(info->queues); 1835e2e004acSRoss Lagerwall info->queues = NULL; 1836e2e004acSRoss Lagerwall goto out; 1837e2e004acSRoss Lagerwall } 1838f599c64fSRoss Lagerwall rtnl_unlock(); 18392688fcb7SAndrew J. Bennieston 18402688fcb7SAndrew J. Bennieston /* Create shared ring, alloc event channel -- for each queue */ 18412688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 18422688fcb7SAndrew J. Bennieston queue = &info->queues[i]; 18432688fcb7SAndrew J. Bennieston err = setup_netfront(dev, queue, feature_split_evtchn); 1844e2e004acSRoss Lagerwall if (err) 18452688fcb7SAndrew J. Bennieston goto destroy_ring; 18462688fcb7SAndrew J. Bennieston } 18470d160211SJeremy Fitzhardinge 18480d160211SJeremy Fitzhardinge again: 18490d160211SJeremy Fitzhardinge err = xenbus_transaction_start(&xbt); 18500d160211SJeremy Fitzhardinge if (err) { 18510d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "starting transaction"); 18520d160211SJeremy Fitzhardinge goto destroy_ring; 18530d160211SJeremy Fitzhardinge } 18540d160211SJeremy Fitzhardinge 1855812494d9Schas williams if (xenbus_exists(XBT_NIL, 1856812494d9Schas williams info->xbdev->otherend, "multi-queue-max-queues")) { 1857812494d9Schas williams /* Write the number of queues */ 1858812494d9Schas williams err = xenbus_printf(xbt, dev->nodename, 1859812494d9Schas williams "multi-queue-num-queues", "%u", num_queues); 1860812494d9Schas williams if (err) { 1861812494d9Schas williams message = "writing multi-queue-num-queues"; 1862812494d9Schas williams goto abort_transaction_no_dev_fatal; 1863812494d9Schas williams } 1864812494d9Schas williams } 1865812494d9Schas williams 186650ee6061SAndrew J. Bennieston if (num_queues == 1) { 186750ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 186850ee6061SAndrew J. Bennieston if (err) 186950ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1870d634bf2cSWei Liu } else { 187150ee6061SAndrew J. Bennieston /* Write the keys for each queue */ 187250ee6061SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 187350ee6061SAndrew J. Bennieston queue = &info->queues[i]; 187450ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 187550ee6061SAndrew J. Bennieston if (err) 187650ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1877d634bf2cSWei Liu } 1878d634bf2cSWei Liu } 18790d160211SJeremy Fitzhardinge 188050ee6061SAndrew J. Bennieston /* The remaining keys are not queue-specific */ 18810d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 18820d160211SJeremy Fitzhardinge 1); 18830d160211SJeremy Fitzhardinge if (err) { 18840d160211SJeremy Fitzhardinge message = "writing request-rx-copy"; 18850d160211SJeremy Fitzhardinge goto abort_transaction; 18860d160211SJeremy Fitzhardinge } 18870d160211SJeremy Fitzhardinge 18880d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 18890d160211SJeremy Fitzhardinge if (err) { 18900d160211SJeremy Fitzhardinge message = "writing feature-rx-notify"; 18910d160211SJeremy Fitzhardinge goto abort_transaction; 18920d160211SJeremy Fitzhardinge } 18930d160211SJeremy Fitzhardinge 18940d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 18950d160211SJeremy Fitzhardinge if (err) { 18960d160211SJeremy Fitzhardinge message = "writing feature-sg"; 18970d160211SJeremy Fitzhardinge goto abort_transaction; 18980d160211SJeremy Fitzhardinge } 18990d160211SJeremy Fitzhardinge 19000d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 19010d160211SJeremy Fitzhardinge if (err) { 19020d160211SJeremy Fitzhardinge message = "writing feature-gso-tcpv4"; 19030d160211SJeremy Fitzhardinge goto abort_transaction; 19040d160211SJeremy Fitzhardinge } 19050d160211SJeremy Fitzhardinge 19062c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 19072c0057deSPaul Durrant if (err) { 19082c0057deSPaul Durrant message = "writing feature-gso-tcpv6"; 19092c0057deSPaul Durrant goto abort_transaction; 19102c0057deSPaul Durrant } 19112c0057deSPaul Durrant 19122c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 19132c0057deSPaul Durrant "1"); 19142c0057deSPaul Durrant if (err) { 19152c0057deSPaul Durrant message = "writing feature-ipv6-csum-offload"; 19162c0057deSPaul Durrant goto abort_transaction; 19172c0057deSPaul Durrant } 19182c0057deSPaul Durrant 19190d160211SJeremy Fitzhardinge err = xenbus_transaction_end(xbt, 0); 19200d160211SJeremy Fitzhardinge if (err) { 19210d160211SJeremy Fitzhardinge if (err == -EAGAIN) 19220d160211SJeremy Fitzhardinge goto again; 19230d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "completing transaction"); 19240d160211SJeremy Fitzhardinge goto destroy_ring; 19250d160211SJeremy Fitzhardinge } 19260d160211SJeremy Fitzhardinge 19270d160211SJeremy Fitzhardinge return 0; 19280d160211SJeremy Fitzhardinge 19290d160211SJeremy Fitzhardinge abort_transaction: 19300d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "%s", message); 193150ee6061SAndrew J. Bennieston abort_transaction_no_dev_fatal: 193250ee6061SAndrew J. Bennieston xenbus_transaction_end(xbt, 1); 19330d160211SJeremy Fitzhardinge destroy_ring: 19340d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 1935f599c64fSRoss Lagerwall rtnl_lock(); 1936e2e004acSRoss Lagerwall xennet_destroy_queues(info); 19370d160211SJeremy Fitzhardinge out: 1938f599c64fSRoss Lagerwall rtnl_unlock(); 1939cb257783SRoss Lagerwall out_unlocked: 1940d86b5672SVitaly Kuznetsov device_unregister(&dev->dev); 19410d160211SJeremy Fitzhardinge return err; 19420d160211SJeremy Fitzhardinge } 19430d160211SJeremy Fitzhardinge 19440d160211SJeremy Fitzhardinge static int xennet_connect(struct net_device *dev) 19450d160211SJeremy Fitzhardinge { 19460d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 19472688fcb7SAndrew J. Bennieston unsigned int num_queues = 0; 1948a5b5dc3cSDavid Vrabel int err; 19492688fcb7SAndrew J. Bennieston unsigned int j = 0; 19502688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 19510d160211SJeremy Fitzhardinge 19522890ea5cSJuergen Gross if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { 19530d160211SJeremy Fitzhardinge dev_info(&dev->dev, 1954898eb71cSJoe Perches "backend does not support copying receive path\n"); 19550d160211SJeremy Fitzhardinge return -ENODEV; 19560d160211SJeremy Fitzhardinge } 19570d160211SJeremy Fitzhardinge 1958f502bf2bSIan Campbell err = talk_to_netback(np->xbdev, np); 19590d160211SJeremy Fitzhardinge if (err) 19600d160211SJeremy Fitzhardinge return err; 19610d160211SJeremy Fitzhardinge 19622688fcb7SAndrew J. Bennieston /* talk_to_netback() sets the correct number of queues */ 19632688fcb7SAndrew J. Bennieston num_queues = dev->real_num_tx_queues; 19642688fcb7SAndrew J. Bennieston 1965f599c64fSRoss Lagerwall if (dev->reg_state == NETREG_UNINITIALIZED) { 1966f599c64fSRoss Lagerwall err = register_netdev(dev); 1967f599c64fSRoss Lagerwall if (err) { 1968f599c64fSRoss Lagerwall pr_warn("%s: register_netdev err=%d\n", __func__, err); 1969f599c64fSRoss Lagerwall device_unregister(&np->xbdev->dev); 1970f599c64fSRoss Lagerwall return err; 1971f599c64fSRoss Lagerwall } 1972f599c64fSRoss Lagerwall } 1973f599c64fSRoss Lagerwall 197445c8184cSRoss Lagerwall rtnl_lock(); 197545c8184cSRoss Lagerwall netdev_update_features(dev); 197645c8184cSRoss Lagerwall rtnl_unlock(); 197745c8184cSRoss Lagerwall 19780d160211SJeremy Fitzhardinge /* 1979a5b5dc3cSDavid Vrabel * All public and private state should now be sane. Get 19800d160211SJeremy Fitzhardinge * ready to start sending and receiving packets and give the driver 19810d160211SJeremy Fitzhardinge * domain a kick because we've probably just requeued some 19820d160211SJeremy Fitzhardinge * packets. 19830d160211SJeremy Fitzhardinge */ 19840d160211SJeremy Fitzhardinge netif_carrier_on(np->netdev); 19852688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) { 19862688fcb7SAndrew J. Bennieston queue = &np->queues[j]; 1987f50b4076SDavid Vrabel 19882688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 19892688fcb7SAndrew J. Bennieston if (queue->tx_irq != queue->rx_irq) 19902688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 19910d160211SJeremy Fitzhardinge 1992f50b4076SDavid Vrabel spin_lock_irq(&queue->tx_lock); 1993f50b4076SDavid Vrabel xennet_tx_buf_gc(queue); 19942688fcb7SAndrew J. Bennieston spin_unlock_irq(&queue->tx_lock); 1995f50b4076SDavid Vrabel 1996f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock); 1997f50b4076SDavid Vrabel xennet_alloc_rx_buffers(queue); 19982688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 19992688fcb7SAndrew J. Bennieston } 20000d160211SJeremy Fitzhardinge 20010d160211SJeremy Fitzhardinge return 0; 20020d160211SJeremy Fitzhardinge } 20030d160211SJeremy Fitzhardinge 20040d160211SJeremy Fitzhardinge /** 20050d160211SJeremy Fitzhardinge * Callback received when the backend's state changes. 20060d160211SJeremy Fitzhardinge */ 2007f502bf2bSIan Campbell static void netback_changed(struct xenbus_device *dev, 20080d160211SJeremy Fitzhardinge enum xenbus_state backend_state) 20090d160211SJeremy Fitzhardinge { 20101b713e00SGreg Kroah-Hartman struct netfront_info *np = dev_get_drvdata(&dev->dev); 20110d160211SJeremy Fitzhardinge struct net_device *netdev = np->netdev; 20120d160211SJeremy Fitzhardinge 20130d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 20140d160211SJeremy Fitzhardinge 20158edfe2e9SJuergen Gross wake_up_all(&module_wq); 20168edfe2e9SJuergen Gross 20170d160211SJeremy Fitzhardinge switch (backend_state) { 20180d160211SJeremy Fitzhardinge case XenbusStateInitialising: 20190d160211SJeremy Fitzhardinge case XenbusStateInitialised: 2020b78c9512SNoboru Iwamatsu case XenbusStateReconfiguring: 2021b78c9512SNoboru Iwamatsu case XenbusStateReconfigured: 20220d160211SJeremy Fitzhardinge case XenbusStateUnknown: 20230d160211SJeremy Fitzhardinge break; 20240d160211SJeremy Fitzhardinge 20250d160211SJeremy Fitzhardinge case XenbusStateInitWait: 20260d160211SJeremy Fitzhardinge if (dev->state != XenbusStateInitialising) 20270d160211SJeremy Fitzhardinge break; 20280d160211SJeremy Fitzhardinge if (xennet_connect(netdev) != 0) 20290d160211SJeremy Fitzhardinge break; 20300d160211SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateConnected); 203108e34eb1SLaszlo Ersek break; 203208e34eb1SLaszlo Ersek 203308e34eb1SLaszlo Ersek case XenbusStateConnected: 2034ee89bab1SAmerigo Wang netdev_notify_peers(netdev); 20350d160211SJeremy Fitzhardinge break; 20360d160211SJeremy Fitzhardinge 2037bce3ea81SDavid Vrabel case XenbusStateClosed: 2038bce3ea81SDavid Vrabel if (dev->state == XenbusStateClosed) 2039bce3ea81SDavid Vrabel break; 2040a32b9d91SGustavo A. R. Silva /* Fall through - Missed the backend's CLOSING state. */ 20410d160211SJeremy Fitzhardinge case XenbusStateClosing: 20420d160211SJeremy Fitzhardinge xenbus_frontend_closed(dev); 20430d160211SJeremy Fitzhardinge break; 20440d160211SJeremy Fitzhardinge } 20450d160211SJeremy Fitzhardinge } 20460d160211SJeremy Fitzhardinge 2047e0ce4af9SIan Campbell static const struct xennet_stat { 2048e0ce4af9SIan Campbell char name[ETH_GSTRING_LEN]; 2049e0ce4af9SIan Campbell u16 offset; 2050e0ce4af9SIan Campbell } xennet_stats[] = { 2051e0ce4af9SIan Campbell { 2052e0ce4af9SIan Campbell "rx_gso_checksum_fixup", 2053e0ce4af9SIan Campbell offsetof(struct netfront_info, rx_gso_checksum_fixup) 2054e0ce4af9SIan Campbell }, 2055e0ce4af9SIan Campbell }; 2056e0ce4af9SIan Campbell 2057e0ce4af9SIan Campbell static int xennet_get_sset_count(struct net_device *dev, int string_set) 2058e0ce4af9SIan Campbell { 2059e0ce4af9SIan Campbell switch (string_set) { 2060e0ce4af9SIan Campbell case ETH_SS_STATS: 2061e0ce4af9SIan Campbell return ARRAY_SIZE(xennet_stats); 2062e0ce4af9SIan Campbell default: 2063e0ce4af9SIan Campbell return -EINVAL; 2064e0ce4af9SIan Campbell } 2065e0ce4af9SIan Campbell } 2066e0ce4af9SIan Campbell 2067e0ce4af9SIan Campbell static void xennet_get_ethtool_stats(struct net_device *dev, 2068e0ce4af9SIan Campbell struct ethtool_stats *stats, u64 * data) 2069e0ce4af9SIan Campbell { 2070e0ce4af9SIan Campbell void *np = netdev_priv(dev); 2071e0ce4af9SIan Campbell int i; 2072e0ce4af9SIan Campbell 2073e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 20742688fcb7SAndrew J. Bennieston data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2075e0ce4af9SIan Campbell } 2076e0ce4af9SIan Campbell 2077e0ce4af9SIan Campbell static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2078e0ce4af9SIan Campbell { 2079e0ce4af9SIan Campbell int i; 2080e0ce4af9SIan Campbell 2081e0ce4af9SIan Campbell switch (stringset) { 2082e0ce4af9SIan Campbell case ETH_SS_STATS: 2083e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2084e0ce4af9SIan Campbell memcpy(data + i * ETH_GSTRING_LEN, 2085e0ce4af9SIan Campbell xennet_stats[i].name, ETH_GSTRING_LEN); 2086e0ce4af9SIan Campbell break; 2087e0ce4af9SIan Campbell } 2088e0ce4af9SIan Campbell } 2089e0ce4af9SIan Campbell 20900fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops = 20910d160211SJeremy Fitzhardinge { 20920d160211SJeremy Fitzhardinge .get_link = ethtool_op_get_link, 2093e0ce4af9SIan Campbell 2094e0ce4af9SIan Campbell .get_sset_count = xennet_get_sset_count, 2095e0ce4af9SIan Campbell .get_ethtool_stats = xennet_get_ethtool_stats, 2096e0ce4af9SIan Campbell .get_strings = xennet_get_strings, 20970d160211SJeremy Fitzhardinge }; 20980d160211SJeremy Fitzhardinge 20990d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 21001f3c2ebaSDavid Vrabel static ssize_t show_rxbuf(struct device *dev, 21010d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 21020d160211SJeremy Fitzhardinge { 21031f3c2ebaSDavid Vrabel return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); 21040d160211SJeremy Fitzhardinge } 21050d160211SJeremy Fitzhardinge 21061f3c2ebaSDavid Vrabel static ssize_t store_rxbuf(struct device *dev, 21070d160211SJeremy Fitzhardinge struct device_attribute *attr, 21080d160211SJeremy Fitzhardinge const char *buf, size_t len) 21090d160211SJeremy Fitzhardinge { 21100d160211SJeremy Fitzhardinge char *endp; 21110d160211SJeremy Fitzhardinge unsigned long target; 21120d160211SJeremy Fitzhardinge 21130d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN)) 21140d160211SJeremy Fitzhardinge return -EPERM; 21150d160211SJeremy Fitzhardinge 21160d160211SJeremy Fitzhardinge target = simple_strtoul(buf, &endp, 0); 21170d160211SJeremy Fitzhardinge if (endp == buf) 21180d160211SJeremy Fitzhardinge return -EBADMSG; 21190d160211SJeremy Fitzhardinge 21201f3c2ebaSDavid Vrabel /* rxbuf_min and rxbuf_max are no longer configurable. */ 21210d160211SJeremy Fitzhardinge 21220d160211SJeremy Fitzhardinge return len; 21230d160211SJeremy Fitzhardinge } 21240d160211SJeremy Fitzhardinge 2125d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); 2126d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); 2127d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); 212827b917e5STakashi Iwai 212927b917e5STakashi Iwai static struct attribute *xennet_dev_attrs[] = { 213027b917e5STakashi Iwai &dev_attr_rxbuf_min.attr, 213127b917e5STakashi Iwai &dev_attr_rxbuf_max.attr, 213227b917e5STakashi Iwai &dev_attr_rxbuf_cur.attr, 213327b917e5STakashi Iwai NULL 21340d160211SJeremy Fitzhardinge }; 21350d160211SJeremy Fitzhardinge 213627b917e5STakashi Iwai static const struct attribute_group xennet_dev_group = { 213727b917e5STakashi Iwai .attrs = xennet_dev_attrs 213827b917e5STakashi Iwai }; 21390d160211SJeremy Fitzhardinge #endif /* CONFIG_SYSFS */ 21400d160211SJeremy Fitzhardinge 21418e0e46bbSBill Pemberton static int xennet_remove(struct xenbus_device *dev) 21420d160211SJeremy Fitzhardinge { 21431b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 21440d160211SJeremy Fitzhardinge 21450d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 21460d160211SJeremy Fitzhardinge 21475b5971dfSEduardo Otubo if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { 21485b5971dfSEduardo Otubo xenbus_switch_state(dev, XenbusStateClosing); 21498edfe2e9SJuergen Gross wait_event(module_wq, 21505b5971dfSEduardo Otubo xenbus_read_driver_state(dev->otherend) == 2151c2d2e673SJason Andryuk XenbusStateClosing || 2152c2d2e673SJason Andryuk xenbus_read_driver_state(dev->otherend) == 2153c2d2e673SJason Andryuk XenbusStateUnknown); 21545b5971dfSEduardo Otubo 21555b5971dfSEduardo Otubo xenbus_switch_state(dev, XenbusStateClosed); 21568edfe2e9SJuergen Gross wait_event(module_wq, 21575b5971dfSEduardo Otubo xenbus_read_driver_state(dev->otherend) == 21585b5971dfSEduardo Otubo XenbusStateClosed || 21595b5971dfSEduardo Otubo xenbus_read_driver_state(dev->otherend) == 21605b5971dfSEduardo Otubo XenbusStateUnknown); 21615b5971dfSEduardo Otubo } 21625b5971dfSEduardo Otubo 21630d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 21640d160211SJeremy Fitzhardinge 2165f599c64fSRoss Lagerwall if (info->netdev->reg_state == NETREG_REGISTERED) 21666bc96d04SIan Campbell unregister_netdev(info->netdev); 21676bc96d04SIan Campbell 2168f599c64fSRoss Lagerwall if (info->queues) { 2169f599c64fSRoss Lagerwall rtnl_lock(); 2170ad068118SDavid Vrabel xennet_destroy_queues(info); 2171f599c64fSRoss Lagerwall rtnl_unlock(); 2172f599c64fSRoss Lagerwall } 2173900e1833SDavid Vrabel xennet_free_netdev(info->netdev); 21740d160211SJeremy Fitzhardinge 21750d160211SJeremy Fitzhardinge return 0; 21760d160211SJeremy Fitzhardinge } 21770d160211SJeremy Fitzhardinge 217895afae48SDavid Vrabel static const struct xenbus_device_id netfront_ids[] = { 217995afae48SDavid Vrabel { "vif" }, 218095afae48SDavid Vrabel { "" } 218195afae48SDavid Vrabel }; 218295afae48SDavid Vrabel 218395afae48SDavid Vrabel static struct xenbus_driver netfront_driver = { 218495afae48SDavid Vrabel .ids = netfront_ids, 21850d160211SJeremy Fitzhardinge .probe = netfront_probe, 21868e0e46bbSBill Pemberton .remove = xennet_remove, 21870d160211SJeremy Fitzhardinge .resume = netfront_resume, 2188f502bf2bSIan Campbell .otherend_changed = netback_changed, 218995afae48SDavid Vrabel }; 21900d160211SJeremy Fitzhardinge 21910d160211SJeremy Fitzhardinge static int __init netif_init(void) 21920d160211SJeremy Fitzhardinge { 21936e833587SJeremy Fitzhardinge if (!xen_domain()) 21940d160211SJeremy Fitzhardinge return -ENODEV; 21950d160211SJeremy Fitzhardinge 219651c71a3bSKonrad Rzeszutek Wilk if (!xen_has_pv_nic_devices()) 2197b9136d20SIgor Mammedov return -ENODEV; 2198b9136d20SIgor Mammedov 2199383eda32SJoe Perches pr_info("Initialising Xen virtual ethernet driver\n"); 22000d160211SJeremy Fitzhardinge 2201034702a6SJuergen Gross /* Allow as many queues as there are CPUs inut max. 8 if user has not 220232a84405SWei Liu * specified a value. 220332a84405SWei Liu */ 220432a84405SWei Liu if (xennet_max_queues == 0) 2205034702a6SJuergen Gross xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, 2206034702a6SJuergen Gross num_online_cpus()); 220750ee6061SAndrew J. Bennieston 2208ffb78a26SAl Viro return xenbus_register_frontend(&netfront_driver); 22090d160211SJeremy Fitzhardinge } 22100d160211SJeremy Fitzhardinge module_init(netif_init); 22110d160211SJeremy Fitzhardinge 22120d160211SJeremy Fitzhardinge 22130d160211SJeremy Fitzhardinge static void __exit netif_exit(void) 22140d160211SJeremy Fitzhardinge { 2215ffb78a26SAl Viro xenbus_unregister_driver(&netfront_driver); 22160d160211SJeremy Fitzhardinge } 22170d160211SJeremy Fitzhardinge module_exit(netif_exit); 22180d160211SJeremy Fitzhardinge 22190d160211SJeremy Fitzhardinge MODULE_DESCRIPTION("Xen virtual network device frontend"); 22200d160211SJeremy Fitzhardinge MODULE_LICENSE("GPL"); 2221d2f0c52bSMark McLoughlin MODULE_ALIAS("xen:vif"); 22224f93f09bSMark McLoughlin MODULE_ALIAS("xennet"); 2223