10d160211SJeremy Fitzhardinge /* 20d160211SJeremy Fitzhardinge * Virtual network driver for conversing with remote driver backends. 30d160211SJeremy Fitzhardinge * 40d160211SJeremy Fitzhardinge * Copyright (c) 2002-2005, K A Fraser 50d160211SJeremy Fitzhardinge * Copyright (c) 2005, XenSource Ltd 60d160211SJeremy Fitzhardinge * 70d160211SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or 80d160211SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2 90d160211SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed 100d160211SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other 110d160211SJeremy Fitzhardinge * software packages, subject to the following license: 120d160211SJeremy Fitzhardinge * 130d160211SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy 140d160211SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without 150d160211SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify, 160d160211SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software, 170d160211SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to 180d160211SJeremy Fitzhardinge * the following conditions: 190d160211SJeremy Fitzhardinge * 200d160211SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in 210d160211SJeremy Fitzhardinge * all copies or substantial portions of the Software. 220d160211SJeremy Fitzhardinge * 230d160211SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 240d160211SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 250d160211SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 260d160211SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 270d160211SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 280d160211SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 290d160211SJeremy Fitzhardinge * IN THE SOFTWARE. 300d160211SJeremy Fitzhardinge */ 310d160211SJeremy Fitzhardinge 32383eda32SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33383eda32SJoe Perches 340d160211SJeremy Fitzhardinge #include <linux/module.h> 350d160211SJeremy Fitzhardinge #include <linux/kernel.h> 360d160211SJeremy Fitzhardinge #include <linux/netdevice.h> 370d160211SJeremy Fitzhardinge #include <linux/etherdevice.h> 380d160211SJeremy Fitzhardinge #include <linux/skbuff.h> 390d160211SJeremy Fitzhardinge #include <linux/ethtool.h> 400d160211SJeremy Fitzhardinge #include <linux/if_ether.h> 419ecd1a75SWei Liu #include <net/tcp.h> 420d160211SJeremy Fitzhardinge #include <linux/udp.h> 430d160211SJeremy Fitzhardinge #include <linux/moduleparam.h> 440d160211SJeremy Fitzhardinge #include <linux/mm.h> 455a0e3ad6STejun Heo #include <linux/slab.h> 460d160211SJeremy Fitzhardinge #include <net/ip.h> 470d160211SJeremy Fitzhardinge 481ccbf534SJeremy Fitzhardinge #include <xen/xen.h> 490d160211SJeremy Fitzhardinge #include <xen/xenbus.h> 500d160211SJeremy Fitzhardinge #include <xen/events.h> 510d160211SJeremy Fitzhardinge #include <xen/page.h> 52b9136d20SIgor Mammedov #include <xen/platform_pci.h> 530d160211SJeremy Fitzhardinge #include <xen/grant_table.h> 540d160211SJeremy Fitzhardinge 550d160211SJeremy Fitzhardinge #include <xen/interface/io/netif.h> 560d160211SJeremy Fitzhardinge #include <xen/interface/memory.h> 570d160211SJeremy Fitzhardinge #include <xen/interface/grant_table.h> 580d160211SJeremy Fitzhardinge 5950ee6061SAndrew J. Bennieston /* Module parameters */ 60034702a6SJuergen Gross #define MAX_QUEUES_DEFAULT 8 6150ee6061SAndrew J. Bennieston static unsigned int xennet_max_queues; 6250ee6061SAndrew J. Bennieston module_param_named(max_queues, xennet_max_queues, uint, 0644); 6350ee6061SAndrew J. Bennieston MODULE_PARM_DESC(max_queues, 6450ee6061SAndrew J. Bennieston "Maximum number of queues per virtual interface"); 6550ee6061SAndrew J. Bennieston 66c2c63310SAndrea Righi #define XENNET_TIMEOUT (5 * HZ) 67c2c63310SAndrea Righi 680fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops; 690d160211SJeremy Fitzhardinge 700d160211SJeremy Fitzhardinge struct netfront_cb { 713683243bSIan Campbell int pull_to; 720d160211SJeremy Fitzhardinge }; 730d160211SJeremy Fitzhardinge 740d160211SJeremy Fitzhardinge #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 750d160211SJeremy Fitzhardinge 760d160211SJeremy Fitzhardinge #define RX_COPY_THRESHOLD 256 770d160211SJeremy Fitzhardinge 780d160211SJeremy Fitzhardinge #define GRANT_INVALID_REF 0 790d160211SJeremy Fitzhardinge 8030c5d7f0SJulien Grall #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) 8130c5d7f0SJulien Grall #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) 821f3c2ebaSDavid Vrabel 831f3c2ebaSDavid Vrabel /* Minimum number of Rx slots (includes slot for GSO metadata). */ 841f3c2ebaSDavid Vrabel #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1) 850d160211SJeremy Fitzhardinge 862688fcb7SAndrew J. Bennieston /* Queue name is interface name with "-qNNN" appended */ 872688fcb7SAndrew J. Bennieston #define QUEUE_NAME_SIZE (IFNAMSIZ + 6) 882688fcb7SAndrew J. Bennieston 892688fcb7SAndrew J. Bennieston /* IRQ name is queue name with "-tx" or "-rx" appended */ 902688fcb7SAndrew J. Bennieston #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 912688fcb7SAndrew J. Bennieston 928edfe2e9SJuergen Gross static DECLARE_WAIT_QUEUE_HEAD(module_wq); 935b5971dfSEduardo Otubo 94e00f85beSstephen hemminger struct netfront_stats { 95900e1833SDavid Vrabel u64 packets; 96900e1833SDavid Vrabel u64 bytes; 97e00f85beSstephen hemminger struct u64_stats_sync syncp; 98e00f85beSstephen hemminger }; 99e00f85beSstephen hemminger 1002688fcb7SAndrew J. Bennieston struct netfront_info; 1012688fcb7SAndrew J. Bennieston 1022688fcb7SAndrew J. Bennieston struct netfront_queue { 1032688fcb7SAndrew J. Bennieston unsigned int id; /* Queue ID, 0-based */ 1042688fcb7SAndrew J. Bennieston char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ 1052688fcb7SAndrew J. Bennieston struct netfront_info *info; 1060d160211SJeremy Fitzhardinge 107bea3348eSStephen Hemminger struct napi_struct napi; 1080d160211SJeremy Fitzhardinge 109d634bf2cSWei Liu /* Split event channels support, tx_* == rx_* when using 110d634bf2cSWei Liu * single event channel. 111d634bf2cSWei Liu */ 112d634bf2cSWei Liu unsigned int tx_evtchn, rx_evtchn; 113d634bf2cSWei Liu unsigned int tx_irq, rx_irq; 114d634bf2cSWei Liu /* Only used when split event channels support is enabled */ 1152688fcb7SAndrew J. Bennieston char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 1162688fcb7SAndrew J. Bennieston char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 1170d160211SJeremy Fitzhardinge 1180d160211SJeremy Fitzhardinge spinlock_t tx_lock; 11984284d3cSJeremy Fitzhardinge struct xen_netif_tx_front_ring tx; 12084284d3cSJeremy Fitzhardinge int tx_ring_ref; 1210d160211SJeremy Fitzhardinge 1220d160211SJeremy Fitzhardinge /* 1230d160211SJeremy Fitzhardinge * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 1240d160211SJeremy Fitzhardinge * are linked from tx_skb_freelist through skb_entry.link. 1250d160211SJeremy Fitzhardinge * 1260d160211SJeremy Fitzhardinge * NB. Freelist index entries are always going to be less than 1270d160211SJeremy Fitzhardinge * PAGE_OFFSET, whereas pointers to skbs will always be equal or 1280d160211SJeremy Fitzhardinge * greater than PAGE_OFFSET: we use this property to distinguish 1290d160211SJeremy Fitzhardinge * them. 1300d160211SJeremy Fitzhardinge */ 1310d160211SJeremy Fitzhardinge union skb_entry { 1320d160211SJeremy Fitzhardinge struct sk_buff *skb; 1331ffb40b8SIsaku Yamahata unsigned long link; 1340d160211SJeremy Fitzhardinge } tx_skbs[NET_TX_RING_SIZE]; 1350d160211SJeremy Fitzhardinge grant_ref_t gref_tx_head; 1360d160211SJeremy Fitzhardinge grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 137cefe0078SAnnie Li struct page *grant_tx_page[NET_TX_RING_SIZE]; 1380d160211SJeremy Fitzhardinge unsigned tx_skb_freelist; 1390d160211SJeremy Fitzhardinge 14084284d3cSJeremy Fitzhardinge spinlock_t rx_lock ____cacheline_aligned_in_smp; 14184284d3cSJeremy Fitzhardinge struct xen_netif_rx_front_ring rx; 14284284d3cSJeremy Fitzhardinge int rx_ring_ref; 14384284d3cSJeremy Fitzhardinge 14484284d3cSJeremy Fitzhardinge struct timer_list rx_refill_timer; 14584284d3cSJeremy Fitzhardinge 1460d160211SJeremy Fitzhardinge struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 1470d160211SJeremy Fitzhardinge grant_ref_t gref_rx_head; 1480d160211SJeremy Fitzhardinge grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 1492688fcb7SAndrew J. Bennieston }; 1502688fcb7SAndrew J. Bennieston 1512688fcb7SAndrew J. Bennieston struct netfront_info { 1522688fcb7SAndrew J. Bennieston struct list_head list; 1532688fcb7SAndrew J. Bennieston struct net_device *netdev; 1542688fcb7SAndrew J. Bennieston 1552688fcb7SAndrew J. Bennieston struct xenbus_device *xbdev; 1562688fcb7SAndrew J. Bennieston 1572688fcb7SAndrew J. Bennieston /* Multi-queue support */ 1582688fcb7SAndrew J. Bennieston struct netfront_queue *queues; 159e0ce4af9SIan Campbell 160e0ce4af9SIan Campbell /* Statistics */ 161900e1833SDavid Vrabel struct netfront_stats __percpu *rx_stats; 162900e1833SDavid Vrabel struct netfront_stats __percpu *tx_stats; 163e00f85beSstephen hemminger 1642688fcb7SAndrew J. Bennieston atomic_t rx_gso_checksum_fixup; 1650d160211SJeremy Fitzhardinge }; 1660d160211SJeremy Fitzhardinge 1670d160211SJeremy Fitzhardinge struct netfront_rx_info { 1680d160211SJeremy Fitzhardinge struct xen_netif_rx_response rx; 1690d160211SJeremy Fitzhardinge struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 1700d160211SJeremy Fitzhardinge }; 1710d160211SJeremy Fitzhardinge 1721ffb40b8SIsaku Yamahata static void skb_entry_set_link(union skb_entry *list, unsigned short id) 1731ffb40b8SIsaku Yamahata { 1741ffb40b8SIsaku Yamahata list->link = id; 1751ffb40b8SIsaku Yamahata } 1761ffb40b8SIsaku Yamahata 1771ffb40b8SIsaku Yamahata static int skb_entry_is_link(const union skb_entry *list) 1781ffb40b8SIsaku Yamahata { 1791ffb40b8SIsaku Yamahata BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 180807540baSEric Dumazet return (unsigned long)list->skb < PAGE_OFFSET; 1811ffb40b8SIsaku Yamahata } 1821ffb40b8SIsaku Yamahata 1830d160211SJeremy Fitzhardinge /* 1840d160211SJeremy Fitzhardinge * Access macros for acquiring freeing slots in tx_skbs[]. 1850d160211SJeremy Fitzhardinge */ 1860d160211SJeremy Fitzhardinge 1870d160211SJeremy Fitzhardinge static void add_id_to_freelist(unsigned *head, union skb_entry *list, 1880d160211SJeremy Fitzhardinge unsigned short id) 1890d160211SJeremy Fitzhardinge { 1901ffb40b8SIsaku Yamahata skb_entry_set_link(&list[id], *head); 1910d160211SJeremy Fitzhardinge *head = id; 1920d160211SJeremy Fitzhardinge } 1930d160211SJeremy Fitzhardinge 1940d160211SJeremy Fitzhardinge static unsigned short get_id_from_freelist(unsigned *head, 1950d160211SJeremy Fitzhardinge union skb_entry *list) 1960d160211SJeremy Fitzhardinge { 1970d160211SJeremy Fitzhardinge unsigned int id = *head; 1980d160211SJeremy Fitzhardinge *head = list[id].link; 1990d160211SJeremy Fitzhardinge return id; 2000d160211SJeremy Fitzhardinge } 2010d160211SJeremy Fitzhardinge 2020d160211SJeremy Fitzhardinge static int xennet_rxidx(RING_IDX idx) 2030d160211SJeremy Fitzhardinge { 2040d160211SJeremy Fitzhardinge return idx & (NET_RX_RING_SIZE - 1); 2050d160211SJeremy Fitzhardinge } 2060d160211SJeremy Fitzhardinge 2072688fcb7SAndrew J. Bennieston static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, 2080d160211SJeremy Fitzhardinge RING_IDX ri) 2090d160211SJeremy Fitzhardinge { 2100d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2112688fcb7SAndrew J. Bennieston struct sk_buff *skb = queue->rx_skbs[i]; 2122688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 2130d160211SJeremy Fitzhardinge return skb; 2140d160211SJeremy Fitzhardinge } 2150d160211SJeremy Fitzhardinge 2162688fcb7SAndrew J. Bennieston static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, 2170d160211SJeremy Fitzhardinge RING_IDX ri) 2180d160211SJeremy Fitzhardinge { 2190d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri); 2202688fcb7SAndrew J. Bennieston grant_ref_t ref = queue->grant_rx_ref[i]; 2212688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 2220d160211SJeremy Fitzhardinge return ref; 2230d160211SJeremy Fitzhardinge } 2240d160211SJeremy Fitzhardinge 2250d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 22627b917e5STakashi Iwai static const struct attribute_group xennet_dev_group; 2270d160211SJeremy Fitzhardinge #endif 2280d160211SJeremy Fitzhardinge 2293ad9b358SMichał Mirosław static bool xennet_can_sg(struct net_device *dev) 2300d160211SJeremy Fitzhardinge { 2313ad9b358SMichał Mirosław return dev->features & NETIF_F_SG; 2320d160211SJeremy Fitzhardinge } 2330d160211SJeremy Fitzhardinge 2340d160211SJeremy Fitzhardinge 235e99e88a9SKees Cook static void rx_refill_timeout(struct timer_list *t) 2360d160211SJeremy Fitzhardinge { 237e99e88a9SKees Cook struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); 2382688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 2390d160211SJeremy Fitzhardinge } 2400d160211SJeremy Fitzhardinge 2412688fcb7SAndrew J. Bennieston static int netfront_tx_slot_available(struct netfront_queue *queue) 2420d160211SJeremy Fitzhardinge { 2432688fcb7SAndrew J. Bennieston return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < 24457f230abSJuergen Gross (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); 2450d160211SJeremy Fitzhardinge } 2460d160211SJeremy Fitzhardinge 2472688fcb7SAndrew J. Bennieston static void xennet_maybe_wake_tx(struct netfront_queue *queue) 2480d160211SJeremy Fitzhardinge { 2492688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 2502688fcb7SAndrew J. Bennieston struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); 2510d160211SJeremy Fitzhardinge 2522688fcb7SAndrew J. Bennieston if (unlikely(netif_tx_queue_stopped(dev_queue)) && 2532688fcb7SAndrew J. Bennieston netfront_tx_slot_available(queue) && 2540d160211SJeremy Fitzhardinge likely(netif_running(dev))) 2552688fcb7SAndrew J. Bennieston netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); 2560d160211SJeremy Fitzhardinge } 2570d160211SJeremy Fitzhardinge 2581f3c2ebaSDavid Vrabel 2591f3c2ebaSDavid Vrabel static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) 2600d160211SJeremy Fitzhardinge { 2610d160211SJeremy Fitzhardinge struct sk_buff *skb; 2620d160211SJeremy Fitzhardinge struct page *page; 2630d160211SJeremy Fitzhardinge 2642688fcb7SAndrew J. Bennieston skb = __netdev_alloc_skb(queue->info->netdev, 2652688fcb7SAndrew J. Bennieston RX_COPY_THRESHOLD + NET_IP_ALIGN, 2660d160211SJeremy Fitzhardinge GFP_ATOMIC | __GFP_NOWARN); 2670d160211SJeremy Fitzhardinge if (unlikely(!skb)) 2681f3c2ebaSDavid Vrabel return NULL; 269617a20bbSIsaku Yamahata 2700d160211SJeremy Fitzhardinge page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 2710d160211SJeremy Fitzhardinge if (!page) { 2720d160211SJeremy Fitzhardinge kfree_skb(skb); 2731f3c2ebaSDavid Vrabel return NULL; 2740d160211SJeremy Fitzhardinge } 275093b9c71SJan Beulich skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE); 2760d160211SJeremy Fitzhardinge 2771f3c2ebaSDavid Vrabel /* Align ip header to a 16 bytes boundary */ 2781f3c2ebaSDavid Vrabel skb_reserve(skb, NET_IP_ALIGN); 2792688fcb7SAndrew J. Bennieston skb->dev = queue->info->netdev; 2800d160211SJeremy Fitzhardinge 2811f3c2ebaSDavid Vrabel return skb; 2821f3c2ebaSDavid Vrabel } 2831f3c2ebaSDavid Vrabel 2841f3c2ebaSDavid Vrabel 2851f3c2ebaSDavid Vrabel static void xennet_alloc_rx_buffers(struct netfront_queue *queue) 2861f3c2ebaSDavid Vrabel { 2871f3c2ebaSDavid Vrabel RING_IDX req_prod = queue->rx.req_prod_pvt; 2881f3c2ebaSDavid Vrabel int notify; 289538d9291SVineeth Remanan Pillai int err = 0; 2901f3c2ebaSDavid Vrabel 2911f3c2ebaSDavid Vrabel if (unlikely(!netif_carrier_ok(queue->info->netdev))) 2921f3c2ebaSDavid Vrabel return; 2931f3c2ebaSDavid Vrabel 2941f3c2ebaSDavid Vrabel for (req_prod = queue->rx.req_prod_pvt; 2951f3c2ebaSDavid Vrabel req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; 2961f3c2ebaSDavid Vrabel req_prod++) { 2971f3c2ebaSDavid Vrabel struct sk_buff *skb; 2981f3c2ebaSDavid Vrabel unsigned short id; 2991f3c2ebaSDavid Vrabel grant_ref_t ref; 30030c5d7f0SJulien Grall struct page *page; 3011f3c2ebaSDavid Vrabel struct xen_netif_rx_request *req; 3021f3c2ebaSDavid Vrabel 3031f3c2ebaSDavid Vrabel skb = xennet_alloc_one_rx_buffer(queue); 304538d9291SVineeth Remanan Pillai if (!skb) { 305538d9291SVineeth Remanan Pillai err = -ENOMEM; 3061f3c2ebaSDavid Vrabel break; 307538d9291SVineeth Remanan Pillai } 3081f3c2ebaSDavid Vrabel 3091f3c2ebaSDavid Vrabel id = xennet_rxidx(req_prod); 3100d160211SJeremy Fitzhardinge 3112688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[id]); 3122688fcb7SAndrew J. Bennieston queue->rx_skbs[id] = skb; 3130d160211SJeremy Fitzhardinge 3142688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_rx_head); 315269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 3162688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = ref; 3170d160211SJeremy Fitzhardinge 31830c5d7f0SJulien Grall page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 3190d160211SJeremy Fitzhardinge 3201f3c2ebaSDavid Vrabel req = RING_GET_REQUEST(&queue->rx, req_prod); 32130c5d7f0SJulien Grall gnttab_page_grant_foreign_access_ref_one(ref, 3222688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id, 32330c5d7f0SJulien Grall page, 3240d160211SJeremy Fitzhardinge 0); 3250d160211SJeremy Fitzhardinge req->id = id; 3260d160211SJeremy Fitzhardinge req->gref = ref; 3270d160211SJeremy Fitzhardinge } 3280d160211SJeremy Fitzhardinge 3291f3c2ebaSDavid Vrabel queue->rx.req_prod_pvt = req_prod; 3301f3c2ebaSDavid Vrabel 331538d9291SVineeth Remanan Pillai /* Try again later if there are not enough requests or skb allocation 332538d9291SVineeth Remanan Pillai * failed. 333538d9291SVineeth Remanan Pillai * Enough requests is quantified as the sum of newly created slots and 334538d9291SVineeth Remanan Pillai * the unconsumed slots at the backend. 335538d9291SVineeth Remanan Pillai */ 336538d9291SVineeth Remanan Pillai if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN || 337538d9291SVineeth Remanan Pillai unlikely(err)) { 3381f3c2ebaSDavid Vrabel mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); 3391f3c2ebaSDavid Vrabel return; 3401f3c2ebaSDavid Vrabel } 3411f3c2ebaSDavid Vrabel 3422688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); 3430d160211SJeremy Fitzhardinge if (notify) 3442688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 3450d160211SJeremy Fitzhardinge } 3460d160211SJeremy Fitzhardinge 3470d160211SJeremy Fitzhardinge static int xennet_open(struct net_device *dev) 3480d160211SJeremy Fitzhardinge { 3490d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 3502688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 3512688fcb7SAndrew J. Bennieston unsigned int i = 0; 3522688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 3530d160211SJeremy Fitzhardinge 354f599c64fSRoss Lagerwall if (!np->queues) 355f599c64fSRoss Lagerwall return -ENODEV; 356f599c64fSRoss Lagerwall 3572688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 3582688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 3592688fcb7SAndrew J. Bennieston napi_enable(&queue->napi); 360bea3348eSStephen Hemminger 3612688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 3620d160211SJeremy Fitzhardinge if (netif_carrier_ok(dev)) { 3632688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 3642688fcb7SAndrew J. Bennieston queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; 3652688fcb7SAndrew J. Bennieston if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) 3662688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 3670d160211SJeremy Fitzhardinge } 3682688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 3692688fcb7SAndrew J. Bennieston } 3700d160211SJeremy Fitzhardinge 3712688fcb7SAndrew J. Bennieston netif_tx_start_all_queues(dev); 3720d160211SJeremy Fitzhardinge 3730d160211SJeremy Fitzhardinge return 0; 3740d160211SJeremy Fitzhardinge } 3750d160211SJeremy Fitzhardinge 3762688fcb7SAndrew J. Bennieston static void xennet_tx_buf_gc(struct netfront_queue *queue) 3770d160211SJeremy Fitzhardinge { 3780d160211SJeremy Fitzhardinge RING_IDX cons, prod; 3790d160211SJeremy Fitzhardinge unsigned short id; 3800d160211SJeremy Fitzhardinge struct sk_buff *skb; 3817d0105b5SMalcolm Crossley bool more_to_do; 3820d160211SJeremy Fitzhardinge 3832688fcb7SAndrew J. Bennieston BUG_ON(!netif_carrier_ok(queue->info->netdev)); 3840d160211SJeremy Fitzhardinge 3850d160211SJeremy Fitzhardinge do { 3862688fcb7SAndrew J. Bennieston prod = queue->tx.sring->rsp_prod; 3870d160211SJeremy Fitzhardinge rmb(); /* Ensure we see responses up to 'rp'. */ 3880d160211SJeremy Fitzhardinge 3892688fcb7SAndrew J. Bennieston for (cons = queue->tx.rsp_cons; cons != prod; cons++) { 3900d160211SJeremy Fitzhardinge struct xen_netif_tx_response *txrsp; 3910d160211SJeremy Fitzhardinge 3922688fcb7SAndrew J. Bennieston txrsp = RING_GET_RESPONSE(&queue->tx, cons); 393f942dc25SIan Campbell if (txrsp->status == XEN_NETIF_RSP_NULL) 3940d160211SJeremy Fitzhardinge continue; 3950d160211SJeremy Fitzhardinge 3960d160211SJeremy Fitzhardinge id = txrsp->id; 3972688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[id].skb; 3980d160211SJeremy Fitzhardinge if (unlikely(gnttab_query_foreign_access( 3992688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id]) != 0)) { 400383eda32SJoe Perches pr_alert("%s: warning -- grant still in use by backend domain\n", 401383eda32SJoe Perches __func__); 4020d160211SJeremy Fitzhardinge BUG(); 4030d160211SJeremy Fitzhardinge } 4040d160211SJeremy Fitzhardinge gnttab_end_foreign_access_ref( 4052688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id], GNTMAP_readonly); 4060d160211SJeremy Fitzhardinge gnttab_release_grant_reference( 4072688fcb7SAndrew J. Bennieston &queue->gref_tx_head, queue->grant_tx_ref[id]); 4082688fcb7SAndrew J. Bennieston queue->grant_tx_ref[id] = GRANT_INVALID_REF; 4092688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = NULL; 4102688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); 4110d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 4120d160211SJeremy Fitzhardinge } 4130d160211SJeremy Fitzhardinge 4142688fcb7SAndrew J. Bennieston queue->tx.rsp_cons = prod; 4150d160211SJeremy Fitzhardinge 4167d0105b5SMalcolm Crossley RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); 4177d0105b5SMalcolm Crossley } while (more_to_do); 4180d160211SJeremy Fitzhardinge 4192688fcb7SAndrew J. Bennieston xennet_maybe_wake_tx(queue); 4200d160211SJeremy Fitzhardinge } 4210d160211SJeremy Fitzhardinge 42230c5d7f0SJulien Grall struct xennet_gnttab_make_txreq { 42330c5d7f0SJulien Grall struct netfront_queue *queue; 42430c5d7f0SJulien Grall struct sk_buff *skb; 42530c5d7f0SJulien Grall struct page *page; 42630c5d7f0SJulien Grall struct xen_netif_tx_request *tx; /* Last request */ 42730c5d7f0SJulien Grall unsigned int size; 42830c5d7f0SJulien Grall }; 42930c5d7f0SJulien Grall 43030c5d7f0SJulien Grall static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, 43130c5d7f0SJulien Grall unsigned int len, void *data) 4320d160211SJeremy Fitzhardinge { 43330c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data; 4340d160211SJeremy Fitzhardinge unsigned int id; 435a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx; 4360d160211SJeremy Fitzhardinge grant_ref_t ref; 43730c5d7f0SJulien Grall /* convenient aliases */ 43830c5d7f0SJulien Grall struct page *page = info->page; 43930c5d7f0SJulien Grall struct netfront_queue *queue = info->queue; 44030c5d7f0SJulien Grall struct sk_buff *skb = info->skb; 4410d160211SJeremy Fitzhardinge 4422688fcb7SAndrew J. Bennieston id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); 443a55e8bb8SDavid Vrabel tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 4442688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 445269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); 4460d160211SJeremy Fitzhardinge 44730c5d7f0SJulien Grall gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 44830c5d7f0SJulien Grall gfn, GNTMAP_readonly); 4490d160211SJeremy Fitzhardinge 450a55e8bb8SDavid Vrabel queue->tx_skbs[id].skb = skb; 451a55e8bb8SDavid Vrabel queue->grant_tx_page[id] = page; 452a55e8bb8SDavid Vrabel queue->grant_tx_ref[id] = ref; 453a55e8bb8SDavid Vrabel 454a55e8bb8SDavid Vrabel tx->id = id; 455a55e8bb8SDavid Vrabel tx->gref = ref; 4560d160211SJeremy Fitzhardinge tx->offset = offset; 4570d160211SJeremy Fitzhardinge tx->size = len; 4580d160211SJeremy Fitzhardinge tx->flags = 0; 459a55e8bb8SDavid Vrabel 46030c5d7f0SJulien Grall info->tx = tx; 46130c5d7f0SJulien Grall info->size += tx->size; 46230c5d7f0SJulien Grall } 46330c5d7f0SJulien Grall 46430c5d7f0SJulien Grall static struct xen_netif_tx_request *xennet_make_first_txreq( 46530c5d7f0SJulien Grall struct netfront_queue *queue, struct sk_buff *skb, 46630c5d7f0SJulien Grall struct page *page, unsigned int offset, unsigned int len) 46730c5d7f0SJulien Grall { 46830c5d7f0SJulien Grall struct xennet_gnttab_make_txreq info = { 46930c5d7f0SJulien Grall .queue = queue, 47030c5d7f0SJulien Grall .skb = skb, 47130c5d7f0SJulien Grall .page = page, 47230c5d7f0SJulien Grall .size = 0, 47330c5d7f0SJulien Grall }; 47430c5d7f0SJulien Grall 47530c5d7f0SJulien Grall gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); 47630c5d7f0SJulien Grall 47730c5d7f0SJulien Grall return info.tx; 47830c5d7f0SJulien Grall } 47930c5d7f0SJulien Grall 48030c5d7f0SJulien Grall static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, 48130c5d7f0SJulien Grall unsigned int len, void *data) 48230c5d7f0SJulien Grall { 48330c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data; 48430c5d7f0SJulien Grall 48530c5d7f0SJulien Grall info->tx->flags |= XEN_NETTXF_more_data; 48630c5d7f0SJulien Grall skb_get(info->skb); 48730c5d7f0SJulien Grall xennet_tx_setup_grant(gfn, offset, len, data); 4880d160211SJeremy Fitzhardinge } 4890d160211SJeremy Fitzhardinge 490a55e8bb8SDavid Vrabel static struct xen_netif_tx_request *xennet_make_txreqs( 491a55e8bb8SDavid Vrabel struct netfront_queue *queue, struct xen_netif_tx_request *tx, 492a55e8bb8SDavid Vrabel struct sk_buff *skb, struct page *page, 493a55e8bb8SDavid Vrabel unsigned int offset, unsigned int len) 494a55e8bb8SDavid Vrabel { 49530c5d7f0SJulien Grall struct xennet_gnttab_make_txreq info = { 49630c5d7f0SJulien Grall .queue = queue, 49730c5d7f0SJulien Grall .skb = skb, 49830c5d7f0SJulien Grall .tx = tx, 49930c5d7f0SJulien Grall }; 50030c5d7f0SJulien Grall 501f36c3747SIan Campbell /* Skip unused frames from start of page */ 502f36c3747SIan Campbell page += offset >> PAGE_SHIFT; 503f36c3747SIan Campbell offset &= ~PAGE_MASK; 504f36c3747SIan Campbell 505a55e8bb8SDavid Vrabel while (len) { 50630c5d7f0SJulien Grall info.page = page; 50730c5d7f0SJulien Grall info.size = 0; 50830c5d7f0SJulien Grall 50930c5d7f0SJulien Grall gnttab_foreach_grant_in_range(page, offset, len, 51030c5d7f0SJulien Grall xennet_make_one_txreq, 51130c5d7f0SJulien Grall &info); 51230c5d7f0SJulien Grall 513f36c3747SIan Campbell page++; 514f36c3747SIan Campbell offset = 0; 51530c5d7f0SJulien Grall len -= info.size; 5160d160211SJeremy Fitzhardinge } 5170d160211SJeremy Fitzhardinge 51830c5d7f0SJulien Grall return info.tx; 5190d160211SJeremy Fitzhardinge } 5200d160211SJeremy Fitzhardinge 521f36c3747SIan Campbell /* 522e84448d5SDavid Vrabel * Count how many ring slots are required to send this skb. Each frag 523e84448d5SDavid Vrabel * might be a compound page. 524f36c3747SIan Campbell */ 525e84448d5SDavid Vrabel static int xennet_count_skb_slots(struct sk_buff *skb) 526f36c3747SIan Campbell { 527f36c3747SIan Campbell int i, frags = skb_shinfo(skb)->nr_frags; 52830c5d7f0SJulien Grall int slots; 529e84448d5SDavid Vrabel 53030c5d7f0SJulien Grall slots = gnttab_count_grant(offset_in_page(skb->data), 53130c5d7f0SJulien Grall skb_headlen(skb)); 532f36c3747SIan Campbell 533f36c3747SIan Campbell for (i = 0; i < frags; i++) { 534f36c3747SIan Campbell skb_frag_t *frag = skb_shinfo(skb)->frags + i; 535f36c3747SIan Campbell unsigned long size = skb_frag_size(frag); 536b54c9d5bSJonathan Lemon unsigned long offset = skb_frag_off(frag); 537f36c3747SIan Campbell 538f36c3747SIan Campbell /* Skip unused frames from start of page */ 539f36c3747SIan Campbell offset &= ~PAGE_MASK; 540f36c3747SIan Campbell 54130c5d7f0SJulien Grall slots += gnttab_count_grant(offset, size); 542f36c3747SIan Campbell } 543f36c3747SIan Campbell 54430c5d7f0SJulien Grall return slots; 545f36c3747SIan Campbell } 546f36c3747SIan Campbell 54750ee6061SAndrew J. Bennieston static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, 548a350ecceSPaolo Abeni struct net_device *sb_dev) 5492688fcb7SAndrew J. Bennieston { 55050ee6061SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 55150ee6061SAndrew J. Bennieston u32 hash; 55250ee6061SAndrew J. Bennieston u16 queue_idx; 55350ee6061SAndrew J. Bennieston 55450ee6061SAndrew J. Bennieston /* First, check if there is only one queue */ 55550ee6061SAndrew J. Bennieston if (num_queues == 1) { 55650ee6061SAndrew J. Bennieston queue_idx = 0; 55750ee6061SAndrew J. Bennieston } else { 55850ee6061SAndrew J. Bennieston hash = skb_get_hash(skb); 55950ee6061SAndrew J. Bennieston queue_idx = hash % num_queues; 56050ee6061SAndrew J. Bennieston } 56150ee6061SAndrew J. Bennieston 56250ee6061SAndrew J. Bennieston return queue_idx; 5632688fcb7SAndrew J. Bennieston } 5642688fcb7SAndrew J. Bennieston 56530c5d7f0SJulien Grall #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 56630c5d7f0SJulien Grall 56724a94b3cSLuc Van Oostenryck static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 5680d160211SJeremy Fitzhardinge { 5690d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 570900e1833SDavid Vrabel struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); 571a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx, *first_tx; 572a55e8bb8SDavid Vrabel unsigned int i; 5730d160211SJeremy Fitzhardinge int notify; 574f36c3747SIan Campbell int slots; 575a55e8bb8SDavid Vrabel struct page *page; 576a55e8bb8SDavid Vrabel unsigned int offset; 577a55e8bb8SDavid Vrabel unsigned int len; 578cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 5792688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 5802688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 5812688fcb7SAndrew J. Bennieston u16 queue_index; 582fd07160bSVitaly Kuznetsov struct sk_buff *nskb; 5832688fcb7SAndrew J. Bennieston 5842688fcb7SAndrew J. Bennieston /* Drop the packet if no queues are set up */ 5852688fcb7SAndrew J. Bennieston if (num_queues < 1) 5862688fcb7SAndrew J. Bennieston goto drop; 5872688fcb7SAndrew J. Bennieston /* Determine which queue to transmit this SKB on */ 5882688fcb7SAndrew J. Bennieston queue_index = skb_get_queue_mapping(skb); 5892688fcb7SAndrew J. Bennieston queue = &np->queues[queue_index]; 5900d160211SJeremy Fitzhardinge 5919ecd1a75SWei Liu /* If skb->len is too big for wire format, drop skb and alert 5929ecd1a75SWei Liu * user about misconfiguration. 5939ecd1a75SWei Liu */ 5949ecd1a75SWei Liu if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) { 5959ecd1a75SWei Liu net_alert_ratelimited( 5969ecd1a75SWei Liu "xennet: skb->len = %u, too big for wire format\n", 5979ecd1a75SWei Liu skb->len); 5989ecd1a75SWei Liu goto drop; 5999ecd1a75SWei Liu } 6009ecd1a75SWei Liu 601e84448d5SDavid Vrabel slots = xennet_count_skb_slots(skb); 60230c5d7f0SJulien Grall if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) { 60397a6d1bbSZoltan Kiss net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n", 60497a6d1bbSZoltan Kiss slots, skb->len); 60597a6d1bbSZoltan Kiss if (skb_linearize(skb)) 6060d160211SJeremy Fitzhardinge goto drop; 6070d160211SJeremy Fitzhardinge } 6080d160211SJeremy Fitzhardinge 609a55e8bb8SDavid Vrabel page = virt_to_page(skb->data); 610a55e8bb8SDavid Vrabel offset = offset_in_page(skb->data); 611fd07160bSVitaly Kuznetsov 612fd07160bSVitaly Kuznetsov /* The first req should be at least ETH_HLEN size or the packet will be 613fd07160bSVitaly Kuznetsov * dropped by netback. 614fd07160bSVitaly Kuznetsov */ 615fd07160bSVitaly Kuznetsov if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 616fd07160bSVitaly Kuznetsov nskb = skb_copy(skb, GFP_ATOMIC); 617fd07160bSVitaly Kuznetsov if (!nskb) 618fd07160bSVitaly Kuznetsov goto drop; 61962f3250fSEric Dumazet dev_consume_skb_any(skb); 620fd07160bSVitaly Kuznetsov skb = nskb; 621fd07160bSVitaly Kuznetsov page = virt_to_page(skb->data); 622fd07160bSVitaly Kuznetsov offset = offset_in_page(skb->data); 623fd07160bSVitaly Kuznetsov } 624fd07160bSVitaly Kuznetsov 625a55e8bb8SDavid Vrabel len = skb_headlen(skb); 626a55e8bb8SDavid Vrabel 6272688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 6280d160211SJeremy Fitzhardinge 6290d160211SJeremy Fitzhardinge if (unlikely(!netif_carrier_ok(dev) || 630f36c3747SIan Campbell (slots > 1 && !xennet_can_sg(dev)) || 6318b86a61dSJohannes Berg netif_needs_gso(skb, netif_skb_features(skb)))) { 6322688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 6330d160211SJeremy Fitzhardinge goto drop; 6340d160211SJeremy Fitzhardinge } 6350d160211SJeremy Fitzhardinge 636a55e8bb8SDavid Vrabel /* First request for the linear area. */ 63730c5d7f0SJulien Grall first_tx = tx = xennet_make_first_txreq(queue, skb, 638a55e8bb8SDavid Vrabel page, offset, len); 63930c5d7f0SJulien Grall offset += tx->size; 64030c5d7f0SJulien Grall if (offset == PAGE_SIZE) { 641a55e8bb8SDavid Vrabel page++; 642a55e8bb8SDavid Vrabel offset = 0; 64330c5d7f0SJulien Grall } 644a55e8bb8SDavid Vrabel len -= tx->size; 6450d160211SJeremy Fitzhardinge 6460d160211SJeremy Fitzhardinge if (skb->ip_summed == CHECKSUM_PARTIAL) 6470d160211SJeremy Fitzhardinge /* local packet? */ 648f942dc25SIan Campbell tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; 6490d160211SJeremy Fitzhardinge else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 6500d160211SJeremy Fitzhardinge /* remote but checksummed. */ 651f942dc25SIan Campbell tx->flags |= XEN_NETTXF_data_validated; 6520d160211SJeremy Fitzhardinge 653a55e8bb8SDavid Vrabel /* Optional extra info after the first request. */ 6540d160211SJeremy Fitzhardinge if (skb_shinfo(skb)->gso_size) { 6550d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 6560d160211SJeremy Fitzhardinge 6570d160211SJeremy Fitzhardinge gso = (struct xen_netif_extra_info *) 658a55e8bb8SDavid Vrabel RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); 6590d160211SJeremy Fitzhardinge 660f942dc25SIan Campbell tx->flags |= XEN_NETTXF_extra_info; 6610d160211SJeremy Fitzhardinge 6620d160211SJeremy Fitzhardinge gso->u.gso.size = skb_shinfo(skb)->gso_size; 6632c0057deSPaul Durrant gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? 6642c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV6 : 6652c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV4; 6660d160211SJeremy Fitzhardinge gso->u.gso.pad = 0; 6670d160211SJeremy Fitzhardinge gso->u.gso.features = 0; 6680d160211SJeremy Fitzhardinge 6690d160211SJeremy Fitzhardinge gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 6700d160211SJeremy Fitzhardinge gso->flags = 0; 6710d160211SJeremy Fitzhardinge } 6720d160211SJeremy Fitzhardinge 673a55e8bb8SDavid Vrabel /* Requests for the rest of the linear area. */ 674a55e8bb8SDavid Vrabel tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); 6750d160211SJeremy Fitzhardinge 676a55e8bb8SDavid Vrabel /* Requests for all the frags. */ 677a55e8bb8SDavid Vrabel for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 678a55e8bb8SDavid Vrabel skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 679b54c9d5bSJonathan Lemon tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), 680b54c9d5bSJonathan Lemon skb_frag_off(frag), 681a55e8bb8SDavid Vrabel skb_frag_size(frag)); 682a55e8bb8SDavid Vrabel } 683a55e8bb8SDavid Vrabel 684a55e8bb8SDavid Vrabel /* First request has the packet length. */ 685a55e8bb8SDavid Vrabel first_tx->size = skb->len; 6860d160211SJeremy Fitzhardinge 6872688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); 6880d160211SJeremy Fitzhardinge if (notify) 6892688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 6900d160211SJeremy Fitzhardinge 691900e1833SDavid Vrabel u64_stats_update_begin(&tx_stats->syncp); 692900e1833SDavid Vrabel tx_stats->bytes += skb->len; 693900e1833SDavid Vrabel tx_stats->packets++; 694900e1833SDavid Vrabel u64_stats_update_end(&tx_stats->syncp); 69510a273a6SJeremy Fitzhardinge 69610a273a6SJeremy Fitzhardinge /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 6972688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 6980d160211SJeremy Fitzhardinge 6992688fcb7SAndrew J. Bennieston if (!netfront_tx_slot_available(queue)) 7002688fcb7SAndrew J. Bennieston netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); 7010d160211SJeremy Fitzhardinge 7022688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 7030d160211SJeremy Fitzhardinge 7046ed10654SPatrick McHardy return NETDEV_TX_OK; 7050d160211SJeremy Fitzhardinge 7060d160211SJeremy Fitzhardinge drop: 70709f75cd7SJeff Garzik dev->stats.tx_dropped++; 708979de8a0SEric W. Biederman dev_kfree_skb_any(skb); 7096ed10654SPatrick McHardy return NETDEV_TX_OK; 7100d160211SJeremy Fitzhardinge } 7110d160211SJeremy Fitzhardinge 7120d160211SJeremy Fitzhardinge static int xennet_close(struct net_device *dev) 7130d160211SJeremy Fitzhardinge { 7140d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 7152688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 7162688fcb7SAndrew J. Bennieston unsigned int i; 7172688fcb7SAndrew J. Bennieston struct netfront_queue *queue; 7182688fcb7SAndrew J. Bennieston netif_tx_stop_all_queues(np->netdev); 7192688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 7202688fcb7SAndrew J. Bennieston queue = &np->queues[i]; 7212688fcb7SAndrew J. Bennieston napi_disable(&queue->napi); 7222688fcb7SAndrew J. Bennieston } 7230d160211SJeremy Fitzhardinge return 0; 7240d160211SJeremy Fitzhardinge } 7250d160211SJeremy Fitzhardinge 7262688fcb7SAndrew J. Bennieston static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, 7270d160211SJeremy Fitzhardinge grant_ref_t ref) 7280d160211SJeremy Fitzhardinge { 7292688fcb7SAndrew J. Bennieston int new = xennet_rxidx(queue->rx.req_prod_pvt); 7300d160211SJeremy Fitzhardinge 7312688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[new]); 7322688fcb7SAndrew J. Bennieston queue->rx_skbs[new] = skb; 7332688fcb7SAndrew J. Bennieston queue->grant_rx_ref[new] = ref; 7342688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; 7352688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; 7362688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt++; 7370d160211SJeremy Fitzhardinge } 7380d160211SJeremy Fitzhardinge 7392688fcb7SAndrew J. Bennieston static int xennet_get_extras(struct netfront_queue *queue, 7400d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras, 7410d160211SJeremy Fitzhardinge RING_IDX rp) 7420d160211SJeremy Fitzhardinge 7430d160211SJeremy Fitzhardinge { 7440d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extra; 7452688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7462688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7470d160211SJeremy Fitzhardinge int err = 0; 7480d160211SJeremy Fitzhardinge 7490d160211SJeremy Fitzhardinge do { 7500d160211SJeremy Fitzhardinge struct sk_buff *skb; 7510d160211SJeremy Fitzhardinge grant_ref_t ref; 7520d160211SJeremy Fitzhardinge 7530d160211SJeremy Fitzhardinge if (unlikely(cons + 1 == rp)) { 7540d160211SJeremy Fitzhardinge if (net_ratelimit()) 7550d160211SJeremy Fitzhardinge dev_warn(dev, "Missing extra info\n"); 7560d160211SJeremy Fitzhardinge err = -EBADR; 7570d160211SJeremy Fitzhardinge break; 7580d160211SJeremy Fitzhardinge } 7590d160211SJeremy Fitzhardinge 7600d160211SJeremy Fitzhardinge extra = (struct xen_netif_extra_info *) 7612688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 7620d160211SJeremy Fitzhardinge 7630d160211SJeremy Fitzhardinge if (unlikely(!extra->type || 7640d160211SJeremy Fitzhardinge extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 7650d160211SJeremy Fitzhardinge if (net_ratelimit()) 7660d160211SJeremy Fitzhardinge dev_warn(dev, "Invalid extra type: %d\n", 7670d160211SJeremy Fitzhardinge extra->type); 7680d160211SJeremy Fitzhardinge err = -EINVAL; 7690d160211SJeremy Fitzhardinge } else { 7700d160211SJeremy Fitzhardinge memcpy(&extras[extra->type - 1], extra, 7710d160211SJeremy Fitzhardinge sizeof(*extra)); 7720d160211SJeremy Fitzhardinge } 7730d160211SJeremy Fitzhardinge 7742688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons); 7752688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons); 7762688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 7770d160211SJeremy Fitzhardinge } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 7780d160211SJeremy Fitzhardinge 7792688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons; 7800d160211SJeremy Fitzhardinge return err; 7810d160211SJeremy Fitzhardinge } 7820d160211SJeremy Fitzhardinge 7832688fcb7SAndrew J. Bennieston static int xennet_get_responses(struct netfront_queue *queue, 7840d160211SJeremy Fitzhardinge struct netfront_rx_info *rinfo, RING_IDX rp, 7850d160211SJeremy Fitzhardinge struct sk_buff_head *list) 7860d160211SJeremy Fitzhardinge { 7870d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo->rx; 7880d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo->extras; 7892688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev; 7902688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 7912688fcb7SAndrew J. Bennieston struct sk_buff *skb = xennet_get_rx_skb(queue, cons); 7922688fcb7SAndrew J. Bennieston grant_ref_t ref = xennet_get_rx_ref(queue, cons); 79357f230abSJuergen Gross int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); 7947158ff6dSWei Liu int slots = 1; 7950d160211SJeremy Fitzhardinge int err = 0; 7960d160211SJeremy Fitzhardinge unsigned long ret; 7970d160211SJeremy Fitzhardinge 798f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_extra_info) { 7992688fcb7SAndrew J. Bennieston err = xennet_get_extras(queue, extras, rp); 8002688fcb7SAndrew J. Bennieston cons = queue->rx.rsp_cons; 8010d160211SJeremy Fitzhardinge } 8020d160211SJeremy Fitzhardinge 8030d160211SJeremy Fitzhardinge for (;;) { 8040d160211SJeremy Fitzhardinge if (unlikely(rx->status < 0 || 80530c5d7f0SJulien Grall rx->offset + rx->status > XEN_PAGE_SIZE)) { 8060d160211SJeremy Fitzhardinge if (net_ratelimit()) 8076c10127dSJulien Grall dev_warn(dev, "rx->offset: %u, size: %d\n", 8080d160211SJeremy Fitzhardinge rx->offset, rx->status); 8092688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref); 8100d160211SJeremy Fitzhardinge err = -EINVAL; 8110d160211SJeremy Fitzhardinge goto next; 8120d160211SJeremy Fitzhardinge } 8130d160211SJeremy Fitzhardinge 8140d160211SJeremy Fitzhardinge /* 8150d160211SJeremy Fitzhardinge * This definitely indicates a bug, either in this driver or in 8160d160211SJeremy Fitzhardinge * the backend driver. In future this should flag the bad 817697089dcSWei Liu * situation to the system controller to reboot the backend. 8180d160211SJeremy Fitzhardinge */ 8190d160211SJeremy Fitzhardinge if (ref == GRANT_INVALID_REF) { 8200d160211SJeremy Fitzhardinge if (net_ratelimit()) 8210d160211SJeremy Fitzhardinge dev_warn(dev, "Bad rx response id %d.\n", 8220d160211SJeremy Fitzhardinge rx->id); 8230d160211SJeremy Fitzhardinge err = -EINVAL; 8240d160211SJeremy Fitzhardinge goto next; 8250d160211SJeremy Fitzhardinge } 8260d160211SJeremy Fitzhardinge 8270d160211SJeremy Fitzhardinge ret = gnttab_end_foreign_access_ref(ref, 0); 8280d160211SJeremy Fitzhardinge BUG_ON(!ret); 8290d160211SJeremy Fitzhardinge 8302688fcb7SAndrew J. Bennieston gnttab_release_grant_reference(&queue->gref_rx_head, ref); 8310d160211SJeremy Fitzhardinge 8320d160211SJeremy Fitzhardinge __skb_queue_tail(list, skb); 8330d160211SJeremy Fitzhardinge 8340d160211SJeremy Fitzhardinge next: 835f942dc25SIan Campbell if (!(rx->flags & XEN_NETRXF_more_data)) 8360d160211SJeremy Fitzhardinge break; 8370d160211SJeremy Fitzhardinge 8387158ff6dSWei Liu if (cons + slots == rp) { 8390d160211SJeremy Fitzhardinge if (net_ratelimit()) 8407158ff6dSWei Liu dev_warn(dev, "Need more slots\n"); 8410d160211SJeremy Fitzhardinge err = -ENOENT; 8420d160211SJeremy Fitzhardinge break; 8430d160211SJeremy Fitzhardinge } 8440d160211SJeremy Fitzhardinge 8452688fcb7SAndrew J. Bennieston rx = RING_GET_RESPONSE(&queue->rx, cons + slots); 8462688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons + slots); 8472688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons + slots); 8487158ff6dSWei Liu slots++; 8490d160211SJeremy Fitzhardinge } 8500d160211SJeremy Fitzhardinge 8517158ff6dSWei Liu if (unlikely(slots > max)) { 8520d160211SJeremy Fitzhardinge if (net_ratelimit()) 853697089dcSWei Liu dev_warn(dev, "Too many slots\n"); 8540d160211SJeremy Fitzhardinge err = -E2BIG; 8550d160211SJeremy Fitzhardinge } 8560d160211SJeremy Fitzhardinge 8570d160211SJeremy Fitzhardinge if (unlikely(err)) 8582688fcb7SAndrew J. Bennieston queue->rx.rsp_cons = cons + slots; 8590d160211SJeremy Fitzhardinge 8600d160211SJeremy Fitzhardinge return err; 8610d160211SJeremy Fitzhardinge } 8620d160211SJeremy Fitzhardinge 8630d160211SJeremy Fitzhardinge static int xennet_set_skb_gso(struct sk_buff *skb, 8640d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso) 8650d160211SJeremy Fitzhardinge { 8660d160211SJeremy Fitzhardinge if (!gso->u.gso.size) { 8670d160211SJeremy Fitzhardinge if (net_ratelimit()) 868383eda32SJoe Perches pr_warn("GSO size must not be zero\n"); 8690d160211SJeremy Fitzhardinge return -EINVAL; 8700d160211SJeremy Fitzhardinge } 8710d160211SJeremy Fitzhardinge 8722c0057deSPaul Durrant if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 && 8732c0057deSPaul Durrant gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) { 8740d160211SJeremy Fitzhardinge if (net_ratelimit()) 875383eda32SJoe Perches pr_warn("Bad GSO type %d\n", gso->u.gso.type); 8760d160211SJeremy Fitzhardinge return -EINVAL; 8770d160211SJeremy Fitzhardinge } 8780d160211SJeremy Fitzhardinge 8790d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_size = gso->u.gso.size; 8802c0057deSPaul Durrant skb_shinfo(skb)->gso_type = 8812c0057deSPaul Durrant (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ? 8822c0057deSPaul Durrant SKB_GSO_TCPV4 : 8832c0057deSPaul Durrant SKB_GSO_TCPV6; 8840d160211SJeremy Fitzhardinge 8850d160211SJeremy Fitzhardinge /* Header must be checked, and gso_segs computed. */ 8860d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 8870d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_segs = 0; 8880d160211SJeremy Fitzhardinge 8890d160211SJeremy Fitzhardinge return 0; 8900d160211SJeremy Fitzhardinge } 8910d160211SJeremy Fitzhardinge 892a761129eSDongli Zhang static int xennet_fill_frags(struct netfront_queue *queue, 8930d160211SJeremy Fitzhardinge struct sk_buff *skb, 8940d160211SJeremy Fitzhardinge struct sk_buff_head *list) 8950d160211SJeremy Fitzhardinge { 8962688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons; 8970d160211SJeremy Fitzhardinge struct sk_buff *nskb; 8980d160211SJeremy Fitzhardinge 8990d160211SJeremy Fitzhardinge while ((nskb = __skb_dequeue(list))) { 9000d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = 9012688fcb7SAndrew J. Bennieston RING_GET_RESPONSE(&queue->rx, ++cons); 90201c68026SIan Campbell skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; 9030d160211SJeremy Fitzhardinge 904d472b3a6SJuergen Gross if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { 905093b9c71SJan Beulich unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 9060d160211SJeremy Fitzhardinge 907d81c5054SJuergen Gross BUG_ON(pull_to < skb_headlen(skb)); 908093b9c71SJan Beulich __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 909093b9c71SJan Beulich } 910ad4f15dcSJuergen Gross if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { 91100b36850SDongli Zhang queue->rx.rsp_cons = ++cons + skb_queue_len(list); 912ad4f15dcSJuergen Gross kfree_skb(nskb); 913a761129eSDongli Zhang return -ENOENT; 914ad4f15dcSJuergen Gross } 915093b9c71SJan Beulich 916d472b3a6SJuergen Gross skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 917d472b3a6SJuergen Gross skb_frag_page(nfrag), 918093b9c71SJan Beulich rx->offset, rx->status, PAGE_SIZE); 9190d160211SJeremy Fitzhardinge 9200d160211SJeremy Fitzhardinge skb_shinfo(nskb)->nr_frags = 0; 9210d160211SJeremy Fitzhardinge kfree_skb(nskb); 9220d160211SJeremy Fitzhardinge } 9230d160211SJeremy Fitzhardinge 924a761129eSDongli Zhang queue->rx.rsp_cons = cons; 925a761129eSDongli Zhang 926a761129eSDongli Zhang return 0; 9270d160211SJeremy Fitzhardinge } 9280d160211SJeremy Fitzhardinge 929e0ce4af9SIan Campbell static int checksum_setup(struct net_device *dev, struct sk_buff *skb) 9300d160211SJeremy Fitzhardinge { 931b5cf66cdSPaul Durrant bool recalculate_partial_csum = false; 932e0ce4af9SIan Campbell 933e0ce4af9SIan Campbell /* 934e0ce4af9SIan Campbell * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy 935e0ce4af9SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO 936e0ce4af9SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and 937e0ce4af9SIan Campbell * recalculate the partial checksum. 938e0ce4af9SIan Campbell */ 939e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { 940e0ce4af9SIan Campbell struct netfront_info *np = netdev_priv(dev); 9412688fcb7SAndrew J. Bennieston atomic_inc(&np->rx_gso_checksum_fixup); 942e0ce4af9SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL; 943b5cf66cdSPaul Durrant recalculate_partial_csum = true; 944e0ce4af9SIan Campbell } 945e0ce4af9SIan Campbell 946e0ce4af9SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ 947e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL) 948e0ce4af9SIan Campbell return 0; 9490d160211SJeremy Fitzhardinge 950b5cf66cdSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum); 9510d160211SJeremy Fitzhardinge } 9520d160211SJeremy Fitzhardinge 9532688fcb7SAndrew J. Bennieston static int handle_incoming_queue(struct netfront_queue *queue, 9540d160211SJeremy Fitzhardinge struct sk_buff_head *rxq) 9550d160211SJeremy Fitzhardinge { 956900e1833SDavid Vrabel struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); 9570d160211SJeremy Fitzhardinge int packets_dropped = 0; 9580d160211SJeremy Fitzhardinge struct sk_buff *skb; 9590d160211SJeremy Fitzhardinge 9600d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(rxq)) != NULL) { 9613683243bSIan Campbell int pull_to = NETFRONT_SKB_CB(skb)->pull_to; 9620d160211SJeremy Fitzhardinge 963093b9c71SJan Beulich if (pull_to > skb_headlen(skb)) 9643683243bSIan Campbell __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 9650d160211SJeremy Fitzhardinge 9660d160211SJeremy Fitzhardinge /* Ethernet work: Delayed to here as it peeks the header. */ 9672688fcb7SAndrew J. Bennieston skb->protocol = eth_type_trans(skb, queue->info->netdev); 968d554f73dSWei Liu skb_reset_network_header(skb); 9690d160211SJeremy Fitzhardinge 9702688fcb7SAndrew J. Bennieston if (checksum_setup(queue->info->netdev, skb)) { 9710d160211SJeremy Fitzhardinge kfree_skb(skb); 9720d160211SJeremy Fitzhardinge packets_dropped++; 9732688fcb7SAndrew J. Bennieston queue->info->netdev->stats.rx_errors++; 9740d160211SJeremy Fitzhardinge continue; 9750d160211SJeremy Fitzhardinge } 9760d160211SJeremy Fitzhardinge 977900e1833SDavid Vrabel u64_stats_update_begin(&rx_stats->syncp); 978900e1833SDavid Vrabel rx_stats->packets++; 979900e1833SDavid Vrabel rx_stats->bytes += skb->len; 980900e1833SDavid Vrabel u64_stats_update_end(&rx_stats->syncp); 9810d160211SJeremy Fitzhardinge 9820d160211SJeremy Fitzhardinge /* Pass it up. */ 9832688fcb7SAndrew J. Bennieston napi_gro_receive(&queue->napi, skb); 9840d160211SJeremy Fitzhardinge } 9850d160211SJeremy Fitzhardinge 9860d160211SJeremy Fitzhardinge return packets_dropped; 9870d160211SJeremy Fitzhardinge } 9880d160211SJeremy Fitzhardinge 989bea3348eSStephen Hemminger static int xennet_poll(struct napi_struct *napi, int budget) 9900d160211SJeremy Fitzhardinge { 9912688fcb7SAndrew J. Bennieston struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); 9922688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 9930d160211SJeremy Fitzhardinge struct sk_buff *skb; 9940d160211SJeremy Fitzhardinge struct netfront_rx_info rinfo; 9950d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo.rx; 9960d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo.extras; 9970d160211SJeremy Fitzhardinge RING_IDX i, rp; 998bea3348eSStephen Hemminger int work_done; 9990d160211SJeremy Fitzhardinge struct sk_buff_head rxq; 10000d160211SJeremy Fitzhardinge struct sk_buff_head errq; 10010d160211SJeremy Fitzhardinge struct sk_buff_head tmpq; 10020d160211SJeremy Fitzhardinge int err; 10030d160211SJeremy Fitzhardinge 10042688fcb7SAndrew J. Bennieston spin_lock(&queue->rx_lock); 10050d160211SJeremy Fitzhardinge 10060d160211SJeremy Fitzhardinge skb_queue_head_init(&rxq); 10070d160211SJeremy Fitzhardinge skb_queue_head_init(&errq); 10080d160211SJeremy Fitzhardinge skb_queue_head_init(&tmpq); 10090d160211SJeremy Fitzhardinge 10102688fcb7SAndrew J. Bennieston rp = queue->rx.sring->rsp_prod; 10110d160211SJeremy Fitzhardinge rmb(); /* Ensure we see queued responses up to 'rp'. */ 10120d160211SJeremy Fitzhardinge 10132688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 10140d160211SJeremy Fitzhardinge work_done = 0; 10150d160211SJeremy Fitzhardinge while ((i != rp) && (work_done < budget)) { 10162688fcb7SAndrew J. Bennieston memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); 10170d160211SJeremy Fitzhardinge memset(extras, 0, sizeof(rinfo.extras)); 10180d160211SJeremy Fitzhardinge 10192688fcb7SAndrew J. Bennieston err = xennet_get_responses(queue, &rinfo, rp, &tmpq); 10200d160211SJeremy Fitzhardinge 10210d160211SJeremy Fitzhardinge if (unlikely(err)) { 10220d160211SJeremy Fitzhardinge err: 10230d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(&tmpq))) 10240d160211SJeremy Fitzhardinge __skb_queue_tail(&errq, skb); 102509f75cd7SJeff Garzik dev->stats.rx_errors++; 10262688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons; 10270d160211SJeremy Fitzhardinge continue; 10280d160211SJeremy Fitzhardinge } 10290d160211SJeremy Fitzhardinge 10300d160211SJeremy Fitzhardinge skb = __skb_dequeue(&tmpq); 10310d160211SJeremy Fitzhardinge 10320d160211SJeremy Fitzhardinge if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 10330d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso; 10340d160211SJeremy Fitzhardinge gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 10350d160211SJeremy Fitzhardinge 10360d160211SJeremy Fitzhardinge if (unlikely(xennet_set_skb_gso(skb, gso))) { 10370d160211SJeremy Fitzhardinge __skb_queue_head(&tmpq, skb); 10382688fcb7SAndrew J. Bennieston queue->rx.rsp_cons += skb_queue_len(&tmpq); 10390d160211SJeremy Fitzhardinge goto err; 10400d160211SJeremy Fitzhardinge } 10410d160211SJeremy Fitzhardinge } 10420d160211SJeremy Fitzhardinge 10433683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = rx->status; 10443683243bSIan Campbell if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) 10453683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; 10460d160211SJeremy Fitzhardinge 1047b54c9d5bSJonathan Lemon skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset); 10483683243bSIan Campbell skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); 10493683243bSIan Campbell skb->data_len = rx->status; 1050093b9c71SJan Beulich skb->len += rx->status; 10510d160211SJeremy Fitzhardinge 1052a761129eSDongli Zhang if (unlikely(xennet_fill_frags(queue, skb, &tmpq))) 1053ad4f15dcSJuergen Gross goto err; 10540d160211SJeremy Fitzhardinge 1055f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_csum_blank) 10560d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_PARTIAL; 1057f942dc25SIan Campbell else if (rx->flags & XEN_NETRXF_data_validated) 10580d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_UNNECESSARY; 10590d160211SJeremy Fitzhardinge 10600d160211SJeremy Fitzhardinge __skb_queue_tail(&rxq, skb); 10610d160211SJeremy Fitzhardinge 1062a761129eSDongli Zhang i = ++queue->rx.rsp_cons; 10630d160211SJeremy Fitzhardinge work_done++; 10640d160211SJeremy Fitzhardinge } 10650d160211SJeremy Fitzhardinge 106656cfe5d0SWang Chen __skb_queue_purge(&errq); 10670d160211SJeremy Fitzhardinge 10682688fcb7SAndrew J. Bennieston work_done -= handle_incoming_queue(queue, &rxq); 10690d160211SJeremy Fitzhardinge 10702688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue); 10710d160211SJeremy Fitzhardinge 10720d160211SJeremy Fitzhardinge if (work_done < budget) { 1073bea3348eSStephen Hemminger int more_to_do = 0; 1074bea3348eSStephen Hemminger 10756ad20165SEric Dumazet napi_complete_done(napi, work_done); 10760d160211SJeremy Fitzhardinge 10772688fcb7SAndrew J. Bennieston RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); 10786a6dc08fSDavid Vrabel if (more_to_do) 10796a6dc08fSDavid Vrabel napi_schedule(napi); 10800d160211SJeremy Fitzhardinge } 10810d160211SJeremy Fitzhardinge 10822688fcb7SAndrew J. Bennieston spin_unlock(&queue->rx_lock); 10830d160211SJeremy Fitzhardinge 1084bea3348eSStephen Hemminger return work_done; 10850d160211SJeremy Fitzhardinge } 10860d160211SJeremy Fitzhardinge 10870d160211SJeremy Fitzhardinge static int xennet_change_mtu(struct net_device *dev, int mtu) 10880d160211SJeremy Fitzhardinge { 10890c36820eSJonathan Davies int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN; 10900d160211SJeremy Fitzhardinge 10910d160211SJeremy Fitzhardinge if (mtu > max) 10920d160211SJeremy Fitzhardinge return -EINVAL; 10930d160211SJeremy Fitzhardinge dev->mtu = mtu; 10940d160211SJeremy Fitzhardinge return 0; 10950d160211SJeremy Fitzhardinge } 10960d160211SJeremy Fitzhardinge 1097bc1f4470Sstephen hemminger static void xennet_get_stats64(struct net_device *dev, 1098e00f85beSstephen hemminger struct rtnl_link_stats64 *tot) 1099e00f85beSstephen hemminger { 1100e00f85beSstephen hemminger struct netfront_info *np = netdev_priv(dev); 1101e00f85beSstephen hemminger int cpu; 1102e00f85beSstephen hemminger 1103e00f85beSstephen hemminger for_each_possible_cpu(cpu) { 1104900e1833SDavid Vrabel struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); 1105900e1833SDavid Vrabel struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); 1106e00f85beSstephen hemminger u64 rx_packets, rx_bytes, tx_packets, tx_bytes; 1107e00f85beSstephen hemminger unsigned int start; 1108e00f85beSstephen hemminger 1109e00f85beSstephen hemminger do { 1110900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&tx_stats->syncp); 1111900e1833SDavid Vrabel tx_packets = tx_stats->packets; 1112900e1833SDavid Vrabel tx_bytes = tx_stats->bytes; 1113900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); 1114e00f85beSstephen hemminger 1115900e1833SDavid Vrabel do { 1116900e1833SDavid Vrabel start = u64_stats_fetch_begin_irq(&rx_stats->syncp); 1117900e1833SDavid Vrabel rx_packets = rx_stats->packets; 1118900e1833SDavid Vrabel rx_bytes = rx_stats->bytes; 1119900e1833SDavid Vrabel } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); 1120e00f85beSstephen hemminger 1121e00f85beSstephen hemminger tot->rx_packets += rx_packets; 1122e00f85beSstephen hemminger tot->tx_packets += tx_packets; 1123e00f85beSstephen hemminger tot->rx_bytes += rx_bytes; 1124e00f85beSstephen hemminger tot->tx_bytes += tx_bytes; 1125e00f85beSstephen hemminger } 1126e00f85beSstephen hemminger 1127e00f85beSstephen hemminger tot->rx_errors = dev->stats.rx_errors; 1128e00f85beSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped; 1129e00f85beSstephen hemminger } 1130e00f85beSstephen hemminger 11312688fcb7SAndrew J. Bennieston static void xennet_release_tx_bufs(struct netfront_queue *queue) 11320d160211SJeremy Fitzhardinge { 11330d160211SJeremy Fitzhardinge struct sk_buff *skb; 11340d160211SJeremy Fitzhardinge int i; 11350d160211SJeremy Fitzhardinge 11360d160211SJeremy Fitzhardinge for (i = 0; i < NET_TX_RING_SIZE; i++) { 11370d160211SJeremy Fitzhardinge /* Skip over entries which are actually freelist references */ 11382688fcb7SAndrew J. Bennieston if (skb_entry_is_link(&queue->tx_skbs[i])) 11390d160211SJeremy Fitzhardinge continue; 11400d160211SJeremy Fitzhardinge 11412688fcb7SAndrew J. Bennieston skb = queue->tx_skbs[i].skb; 11422688fcb7SAndrew J. Bennieston get_page(queue->grant_tx_page[i]); 11432688fcb7SAndrew J. Bennieston gnttab_end_foreign_access(queue->grant_tx_ref[i], 1144cefe0078SAnnie Li GNTMAP_readonly, 11452688fcb7SAndrew J. Bennieston (unsigned long)page_address(queue->grant_tx_page[i])); 11462688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 11472688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 11482688fcb7SAndrew J. Bennieston add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); 11490d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb); 11500d160211SJeremy Fitzhardinge } 11510d160211SJeremy Fitzhardinge } 11520d160211SJeremy Fitzhardinge 11532688fcb7SAndrew J. Bennieston static void xennet_release_rx_bufs(struct netfront_queue *queue) 11540d160211SJeremy Fitzhardinge { 11550d160211SJeremy Fitzhardinge int id, ref; 11560d160211SJeremy Fitzhardinge 11572688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock); 11580d160211SJeremy Fitzhardinge 11590d160211SJeremy Fitzhardinge for (id = 0; id < NET_RX_RING_SIZE; id++) { 1160cefe0078SAnnie Li struct sk_buff *skb; 1161cefe0078SAnnie Li struct page *page; 11620d160211SJeremy Fitzhardinge 11632688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[id]; 1164cefe0078SAnnie Li if (!skb) 1165cefe0078SAnnie Li continue; 1166cefe0078SAnnie Li 11672688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[id]; 1168cefe0078SAnnie Li if (ref == GRANT_INVALID_REF) 1169cefe0078SAnnie Li continue; 1170cefe0078SAnnie Li 1171cefe0078SAnnie Li page = skb_frag_page(&skb_shinfo(skb)->frags[0]); 1172cefe0078SAnnie Li 1173cefe0078SAnnie Li /* gnttab_end_foreign_access() needs a page ref until 1174cefe0078SAnnie Li * foreign access is ended (which may be deferred). 1175cefe0078SAnnie Li */ 1176cefe0078SAnnie Li get_page(page); 1177cefe0078SAnnie Li gnttab_end_foreign_access(ref, 0, 1178cefe0078SAnnie Li (unsigned long)page_address(page)); 11792688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = GRANT_INVALID_REF; 11800d160211SJeremy Fitzhardinge 1181cefe0078SAnnie Li kfree_skb(skb); 11820d160211SJeremy Fitzhardinge } 11830d160211SJeremy Fitzhardinge 11842688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 11850d160211SJeremy Fitzhardinge } 11860d160211SJeremy Fitzhardinge 1187c8f44affSMichał Mirosław static netdev_features_t xennet_fix_features(struct net_device *dev, 1188c8f44affSMichał Mirosław netdev_features_t features) 11898f7b01a1SEric Dumazet { 11908f7b01a1SEric Dumazet struct netfront_info *np = netdev_priv(dev); 11918f7b01a1SEric Dumazet 11922890ea5cSJuergen Gross if (features & NETIF_F_SG && 11932890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) 11948f7b01a1SEric Dumazet features &= ~NETIF_F_SG; 11958f7b01a1SEric Dumazet 11962890ea5cSJuergen Gross if (features & NETIF_F_IPV6_CSUM && 11972890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, 11982890ea5cSJuergen Gross "feature-ipv6-csum-offload", 0)) 11992c0057deSPaul Durrant features &= ~NETIF_F_IPV6_CSUM; 12002c0057deSPaul Durrant 12012890ea5cSJuergen Gross if (features & NETIF_F_TSO && 12022890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) 12038f7b01a1SEric Dumazet features &= ~NETIF_F_TSO; 12048f7b01a1SEric Dumazet 12052890ea5cSJuergen Gross if (features & NETIF_F_TSO6 && 12062890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) 12072c0057deSPaul Durrant features &= ~NETIF_F_TSO6; 12082c0057deSPaul Durrant 12098f7b01a1SEric Dumazet return features; 12108f7b01a1SEric Dumazet } 12118f7b01a1SEric Dumazet 1212c8f44affSMichał Mirosław static int xennet_set_features(struct net_device *dev, 1213c8f44affSMichał Mirosław netdev_features_t features) 12148f7b01a1SEric Dumazet { 12158f7b01a1SEric Dumazet if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) { 12168f7b01a1SEric Dumazet netdev_info(dev, "Reducing MTU because no SG offload"); 12178f7b01a1SEric Dumazet dev->mtu = ETH_DATA_LEN; 12188f7b01a1SEric Dumazet } 12198f7b01a1SEric Dumazet 12208f7b01a1SEric Dumazet return 0; 12218f7b01a1SEric Dumazet } 12228f7b01a1SEric Dumazet 1223d634bf2cSWei Liu static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) 1224cf66f9d4SKonrad Rzeszutek Wilk { 12252688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 1226cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags; 1227cf66f9d4SKonrad Rzeszutek Wilk 12282688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags); 12292688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue); 12302688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags); 1231cf66f9d4SKonrad Rzeszutek Wilk 1232cf66f9d4SKonrad Rzeszutek Wilk return IRQ_HANDLED; 1233cf66f9d4SKonrad Rzeszutek Wilk } 1234cf66f9d4SKonrad Rzeszutek Wilk 1235d634bf2cSWei Liu static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) 1236d634bf2cSWei Liu { 12372688fcb7SAndrew J. Bennieston struct netfront_queue *queue = dev_id; 12382688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev; 1239d634bf2cSWei Liu 1240d634bf2cSWei Liu if (likely(netif_carrier_ok(dev) && 12412688fcb7SAndrew J. Bennieston RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 12422688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi); 1243d634bf2cSWei Liu 1244d634bf2cSWei Liu return IRQ_HANDLED; 1245d634bf2cSWei Liu } 1246d634bf2cSWei Liu 1247d634bf2cSWei Liu static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1248d634bf2cSWei Liu { 1249d634bf2cSWei Liu xennet_tx_interrupt(irq, dev_id); 1250d634bf2cSWei Liu xennet_rx_interrupt(irq, dev_id); 1251d634bf2cSWei Liu return IRQ_HANDLED; 1252d634bf2cSWei Liu } 1253d634bf2cSWei Liu 1254cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1255cf66f9d4SKonrad Rzeszutek Wilk static void xennet_poll_controller(struct net_device *dev) 1256cf66f9d4SKonrad Rzeszutek Wilk { 12572688fcb7SAndrew J. Bennieston /* Poll each queue */ 12582688fcb7SAndrew J. Bennieston struct netfront_info *info = netdev_priv(dev); 12592688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues; 12602688fcb7SAndrew J. Bennieston unsigned int i; 12612688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) 12622688fcb7SAndrew J. Bennieston xennet_interrupt(0, &info->queues[i]); 1263cf66f9d4SKonrad Rzeszutek Wilk } 1264cf66f9d4SKonrad Rzeszutek Wilk #endif 1265cf66f9d4SKonrad Rzeszutek Wilk 12660a0b9d2eSStephen Hemminger static const struct net_device_ops xennet_netdev_ops = { 12670a0b9d2eSStephen Hemminger .ndo_open = xennet_open, 12680a0b9d2eSStephen Hemminger .ndo_stop = xennet_close, 12690a0b9d2eSStephen Hemminger .ndo_start_xmit = xennet_start_xmit, 12700a0b9d2eSStephen Hemminger .ndo_change_mtu = xennet_change_mtu, 1271e00f85beSstephen hemminger .ndo_get_stats64 = xennet_get_stats64, 12720a0b9d2eSStephen Hemminger .ndo_set_mac_address = eth_mac_addr, 12730a0b9d2eSStephen Hemminger .ndo_validate_addr = eth_validate_addr, 1274fb507934SMichał Mirosław .ndo_fix_features = xennet_fix_features, 1275fb507934SMichał Mirosław .ndo_set_features = xennet_set_features, 12762688fcb7SAndrew J. Bennieston .ndo_select_queue = xennet_select_queue, 1277cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER 1278cf66f9d4SKonrad Rzeszutek Wilk .ndo_poll_controller = xennet_poll_controller, 1279cf66f9d4SKonrad Rzeszutek Wilk #endif 12800a0b9d2eSStephen Hemminger }; 12810a0b9d2eSStephen Hemminger 1282900e1833SDavid Vrabel static void xennet_free_netdev(struct net_device *netdev) 1283900e1833SDavid Vrabel { 1284900e1833SDavid Vrabel struct netfront_info *np = netdev_priv(netdev); 1285900e1833SDavid Vrabel 1286900e1833SDavid Vrabel free_percpu(np->rx_stats); 1287900e1833SDavid Vrabel free_percpu(np->tx_stats); 1288900e1833SDavid Vrabel free_netdev(netdev); 1289900e1833SDavid Vrabel } 1290900e1833SDavid Vrabel 12918e0e46bbSBill Pemberton static struct net_device *xennet_create_dev(struct xenbus_device *dev) 12920d160211SJeremy Fitzhardinge { 12932688fcb7SAndrew J. Bennieston int err; 12940d160211SJeremy Fitzhardinge struct net_device *netdev; 12950d160211SJeremy Fitzhardinge struct netfront_info *np; 12960d160211SJeremy Fitzhardinge 129750ee6061SAndrew J. Bennieston netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues); 129841de8d4cSJoe Perches if (!netdev) 12990d160211SJeremy Fitzhardinge return ERR_PTR(-ENOMEM); 13000d160211SJeremy Fitzhardinge 13010d160211SJeremy Fitzhardinge np = netdev_priv(netdev); 13020d160211SJeremy Fitzhardinge np->xbdev = dev; 13030d160211SJeremy Fitzhardinge 13042688fcb7SAndrew J. Bennieston np->queues = NULL; 13050d160211SJeremy Fitzhardinge 1306e00f85beSstephen hemminger err = -ENOMEM; 1307900e1833SDavid Vrabel np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1308900e1833SDavid Vrabel if (np->rx_stats == NULL) 1309900e1833SDavid Vrabel goto exit; 1310900e1833SDavid Vrabel np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); 1311900e1833SDavid Vrabel if (np->tx_stats == NULL) 1312e00f85beSstephen hemminger goto exit; 1313e00f85beSstephen hemminger 13140a0b9d2eSStephen Hemminger netdev->netdev_ops = &xennet_netdev_ops; 13150a0b9d2eSStephen Hemminger 1316fb507934SMichał Mirosław netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1317fb507934SMichał Mirosław NETIF_F_GSO_ROBUST; 13182c0057deSPaul Durrant netdev->hw_features = NETIF_F_SG | 13192c0057deSPaul Durrant NETIF_F_IPV6_CSUM | 13202c0057deSPaul Durrant NETIF_F_TSO | NETIF_F_TSO6; 13210d160211SJeremy Fitzhardinge 1322fc3e5941SIan Campbell /* 1323fc3e5941SIan Campbell * Assume that all hw features are available for now. This set 1324fc3e5941SIan Campbell * will be adjusted by the call to netdev_update_features() in 1325fc3e5941SIan Campbell * xennet_connect() which is the earliest point where we can 1326fc3e5941SIan Campbell * negotiate with the backend regarding supported features. 1327fc3e5941SIan Campbell */ 1328fc3e5941SIan Campbell netdev->features |= netdev->hw_features; 1329fc3e5941SIan Campbell 13307ad24ea4SWilfried Klaebe netdev->ethtool_ops = &xennet_ethtool_ops; 1331e1043a4bSMohammed Gamal netdev->min_mtu = ETH_MIN_MTU; 1332d0c2c997SJarod Wilson netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; 13330d160211SJeremy Fitzhardinge SET_NETDEV_DEV(netdev, &dev->dev); 13340d160211SJeremy Fitzhardinge 13350d160211SJeremy Fitzhardinge np->netdev = netdev; 13360d160211SJeremy Fitzhardinge 13370d160211SJeremy Fitzhardinge netif_carrier_off(netdev); 13380d160211SJeremy Fitzhardinge 1339c2c63310SAndrea Righi do { 1340b707fda2SEduardo Otubo xenbus_switch_state(dev, XenbusStateInitialising); 1341c2c63310SAndrea Righi err = wait_event_timeout(module_wq, 1342822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) != 1343822fb18aSXiao Liang XenbusStateClosed && 1344822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) != 1345c2c63310SAndrea Righi XenbusStateUnknown, XENNET_TIMEOUT); 1346c2c63310SAndrea Righi } while (!err); 1347c2c63310SAndrea Righi 13480d160211SJeremy Fitzhardinge return netdev; 13490d160211SJeremy Fitzhardinge 13500d160211SJeremy Fitzhardinge exit: 1351900e1833SDavid Vrabel xennet_free_netdev(netdev); 13520d160211SJeremy Fitzhardinge return ERR_PTR(err); 13530d160211SJeremy Fitzhardinge } 13540d160211SJeremy Fitzhardinge 13550d160211SJeremy Fitzhardinge /** 13560d160211SJeremy Fitzhardinge * Entry point to this code when a new device is created. Allocate the basic 13570d160211SJeremy Fitzhardinge * structures and the ring buffers for communication with the backend, and 13580d160211SJeremy Fitzhardinge * inform the backend of the appropriate details for those. 13590d160211SJeremy Fitzhardinge */ 13608e0e46bbSBill Pemberton static int netfront_probe(struct xenbus_device *dev, 13610d160211SJeremy Fitzhardinge const struct xenbus_device_id *id) 13620d160211SJeremy Fitzhardinge { 13630d160211SJeremy Fitzhardinge int err; 13640d160211SJeremy Fitzhardinge struct net_device *netdev; 13650d160211SJeremy Fitzhardinge struct netfront_info *info; 13660d160211SJeremy Fitzhardinge 13670d160211SJeremy Fitzhardinge netdev = xennet_create_dev(dev); 13680d160211SJeremy Fitzhardinge if (IS_ERR(netdev)) { 13690d160211SJeremy Fitzhardinge err = PTR_ERR(netdev); 13700d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "creating netdev"); 13710d160211SJeremy Fitzhardinge return err; 13720d160211SJeremy Fitzhardinge } 13730d160211SJeremy Fitzhardinge 13740d160211SJeremy Fitzhardinge info = netdev_priv(netdev); 13751b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, info); 137627b917e5STakashi Iwai #ifdef CONFIG_SYSFS 137727b917e5STakashi Iwai info->netdev->sysfs_groups[0] = &xennet_dev_group; 137827b917e5STakashi Iwai #endif 13790d160211SJeremy Fitzhardinge 13800d160211SJeremy Fitzhardinge return 0; 13810d160211SJeremy Fitzhardinge } 13820d160211SJeremy Fitzhardinge 13830d160211SJeremy Fitzhardinge static void xennet_end_access(int ref, void *page) 13840d160211SJeremy Fitzhardinge { 13850d160211SJeremy Fitzhardinge /* This frees the page as a side-effect */ 13860d160211SJeremy Fitzhardinge if (ref != GRANT_INVALID_REF) 13870d160211SJeremy Fitzhardinge gnttab_end_foreign_access(ref, 0, (unsigned long)page); 13880d160211SJeremy Fitzhardinge } 13890d160211SJeremy Fitzhardinge 13900d160211SJeremy Fitzhardinge static void xennet_disconnect_backend(struct netfront_info *info) 13910d160211SJeremy Fitzhardinge { 13922688fcb7SAndrew J. Bennieston unsigned int i = 0; 13932688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues; 13940d160211SJeremy Fitzhardinge 1395f9feb1e6SDavid Vrabel netif_carrier_off(info->netdev); 1396f9feb1e6SDavid Vrabel 13979a873c71SChas Williams for (i = 0; i < num_queues && info->queues; ++i) { 139876541869SDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 139976541869SDavid Vrabel 140074470954SBoris Ostrovsky del_timer_sync(&queue->rx_refill_timer); 140174470954SBoris Ostrovsky 14022688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 14032688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 14042688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { 14052688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 14062688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->rx_irq, queue); 1407d634bf2cSWei Liu } 14082688fcb7SAndrew J. Bennieston queue->tx_evtchn = queue->rx_evtchn = 0; 14092688fcb7SAndrew J. Bennieston queue->tx_irq = queue->rx_irq = 0; 14100d160211SJeremy Fitzhardinge 1411274b0455SChas Williams if (netif_running(info->netdev)) 1412f9feb1e6SDavid Vrabel napi_synchronize(&queue->napi); 1413f9feb1e6SDavid Vrabel 1414a5b5dc3cSDavid Vrabel xennet_release_tx_bufs(queue); 1415a5b5dc3cSDavid Vrabel xennet_release_rx_bufs(queue); 1416a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_tx_head); 1417a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_rx_head); 1418a5b5dc3cSDavid Vrabel 14190d160211SJeremy Fitzhardinge /* End access and free the pages */ 14202688fcb7SAndrew J. Bennieston xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 14212688fcb7SAndrew J. Bennieston xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 14220d160211SJeremy Fitzhardinge 14232688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 14242688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 14252688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 14262688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 14272688fcb7SAndrew J. Bennieston } 14280d160211SJeremy Fitzhardinge } 14290d160211SJeremy Fitzhardinge 14300d160211SJeremy Fitzhardinge /** 14310d160211SJeremy Fitzhardinge * We are reconnecting to the backend, due to a suspend/resume, or a backend 14320d160211SJeremy Fitzhardinge * driver restart. We tear down our netif structure and recreate it, but 14330d160211SJeremy Fitzhardinge * leave the device-layer structures intact so that this is transparent to the 14340d160211SJeremy Fitzhardinge * rest of the kernel. 14350d160211SJeremy Fitzhardinge */ 14360d160211SJeremy Fitzhardinge static int netfront_resume(struct xenbus_device *dev) 14370d160211SJeremy Fitzhardinge { 14381b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 14390d160211SJeremy Fitzhardinge 14400d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename); 14410d160211SJeremy Fitzhardinge 14420d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 14430d160211SJeremy Fitzhardinge return 0; 14440d160211SJeremy Fitzhardinge } 14450d160211SJeremy Fitzhardinge 14460d160211SJeremy Fitzhardinge static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 14470d160211SJeremy Fitzhardinge { 14480d160211SJeremy Fitzhardinge char *s, *e, *macstr; 14490d160211SJeremy Fitzhardinge int i; 14500d160211SJeremy Fitzhardinge 14510d160211SJeremy Fitzhardinge macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 14520d160211SJeremy Fitzhardinge if (IS_ERR(macstr)) 14530d160211SJeremy Fitzhardinge return PTR_ERR(macstr); 14540d160211SJeremy Fitzhardinge 14550d160211SJeremy Fitzhardinge for (i = 0; i < ETH_ALEN; i++) { 14560d160211SJeremy Fitzhardinge mac[i] = simple_strtoul(s, &e, 16); 14570d160211SJeremy Fitzhardinge if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 14580d160211SJeremy Fitzhardinge kfree(macstr); 14590d160211SJeremy Fitzhardinge return -ENOENT; 14600d160211SJeremy Fitzhardinge } 14610d160211SJeremy Fitzhardinge s = e+1; 14620d160211SJeremy Fitzhardinge } 14630d160211SJeremy Fitzhardinge 14640d160211SJeremy Fitzhardinge kfree(macstr); 14650d160211SJeremy Fitzhardinge return 0; 14660d160211SJeremy Fitzhardinge } 14670d160211SJeremy Fitzhardinge 14682688fcb7SAndrew J. Bennieston static int setup_netfront_single(struct netfront_queue *queue) 1469d634bf2cSWei Liu { 1470d634bf2cSWei Liu int err; 1471d634bf2cSWei Liu 14722688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1473d634bf2cSWei Liu if (err < 0) 1474d634bf2cSWei Liu goto fail; 1475d634bf2cSWei Liu 14762688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1477d634bf2cSWei Liu xennet_interrupt, 14782688fcb7SAndrew J. Bennieston 0, queue->info->netdev->name, queue); 1479d634bf2cSWei Liu if (err < 0) 1480d634bf2cSWei Liu goto bind_fail; 14812688fcb7SAndrew J. Bennieston queue->rx_evtchn = queue->tx_evtchn; 14822688fcb7SAndrew J. Bennieston queue->rx_irq = queue->tx_irq = err; 1483d634bf2cSWei Liu 1484d634bf2cSWei Liu return 0; 1485d634bf2cSWei Liu 1486d634bf2cSWei Liu bind_fail: 14872688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 14882688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1489d634bf2cSWei Liu fail: 1490d634bf2cSWei Liu return err; 1491d634bf2cSWei Liu } 1492d634bf2cSWei Liu 14932688fcb7SAndrew J. Bennieston static int setup_netfront_split(struct netfront_queue *queue) 1494d634bf2cSWei Liu { 1495d634bf2cSWei Liu int err; 1496d634bf2cSWei Liu 14972688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); 1498d634bf2cSWei Liu if (err < 0) 1499d634bf2cSWei Liu goto fail; 15002688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); 1501d634bf2cSWei Liu if (err < 0) 1502d634bf2cSWei Liu goto alloc_rx_evtchn_fail; 1503d634bf2cSWei Liu 15042688fcb7SAndrew J. Bennieston snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 15052688fcb7SAndrew J. Bennieston "%s-tx", queue->name); 15062688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->tx_evtchn, 1507d634bf2cSWei Liu xennet_tx_interrupt, 15082688fcb7SAndrew J. Bennieston 0, queue->tx_irq_name, queue); 1509d634bf2cSWei Liu if (err < 0) 1510d634bf2cSWei Liu goto bind_tx_fail; 15112688fcb7SAndrew J. Bennieston queue->tx_irq = err; 1512d634bf2cSWei Liu 15132688fcb7SAndrew J. Bennieston snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 15142688fcb7SAndrew J. Bennieston "%s-rx", queue->name); 15152688fcb7SAndrew J. Bennieston err = bind_evtchn_to_irqhandler(queue->rx_evtchn, 1516d634bf2cSWei Liu xennet_rx_interrupt, 15172688fcb7SAndrew J. Bennieston 0, queue->rx_irq_name, queue); 1518d634bf2cSWei Liu if (err < 0) 1519d634bf2cSWei Liu goto bind_rx_fail; 15202688fcb7SAndrew J. Bennieston queue->rx_irq = err; 1521d634bf2cSWei Liu 1522d634bf2cSWei Liu return 0; 1523d634bf2cSWei Liu 1524d634bf2cSWei Liu bind_rx_fail: 15252688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue); 15262688fcb7SAndrew J. Bennieston queue->tx_irq = 0; 1527d634bf2cSWei Liu bind_tx_fail: 15282688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); 15292688fcb7SAndrew J. Bennieston queue->rx_evtchn = 0; 1530d634bf2cSWei Liu alloc_rx_evtchn_fail: 15312688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); 15322688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0; 1533d634bf2cSWei Liu fail: 1534d634bf2cSWei Liu return err; 1535d634bf2cSWei Liu } 1536d634bf2cSWei Liu 15372688fcb7SAndrew J. Bennieston static int setup_netfront(struct xenbus_device *dev, 15382688fcb7SAndrew J. Bennieston struct netfront_queue *queue, unsigned int feature_split_evtchn) 15390d160211SJeremy Fitzhardinge { 15400d160211SJeremy Fitzhardinge struct xen_netif_tx_sring *txs; 15410d160211SJeremy Fitzhardinge struct xen_netif_rx_sring *rxs; 1542ccc9d90aSWei Liu grant_ref_t gref; 15430d160211SJeremy Fitzhardinge int err; 15440d160211SJeremy Fitzhardinge 15452688fcb7SAndrew J. Bennieston queue->tx_ring_ref = GRANT_INVALID_REF; 15462688fcb7SAndrew J. Bennieston queue->rx_ring_ref = GRANT_INVALID_REF; 15472688fcb7SAndrew J. Bennieston queue->rx.sring = NULL; 15482688fcb7SAndrew J. Bennieston queue->tx.sring = NULL; 15490d160211SJeremy Fitzhardinge 1550a144ff09SIan Campbell txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 15510d160211SJeremy Fitzhardinge if (!txs) { 15520d160211SJeremy Fitzhardinge err = -ENOMEM; 15530d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating tx ring page"); 15540d160211SJeremy Fitzhardinge goto fail; 15550d160211SJeremy Fitzhardinge } 15560d160211SJeremy Fitzhardinge SHARED_RING_INIT(txs); 155730c5d7f0SJulien Grall FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE); 15580d160211SJeremy Fitzhardinge 1559ccc9d90aSWei Liu err = xenbus_grant_ring(dev, txs, 1, &gref); 15601ca2983aSWei Liu if (err < 0) 15611ca2983aSWei Liu goto grant_tx_ring_fail; 1562ccc9d90aSWei Liu queue->tx_ring_ref = gref; 15630d160211SJeremy Fitzhardinge 1564a144ff09SIan Campbell rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 15650d160211SJeremy Fitzhardinge if (!rxs) { 15660d160211SJeremy Fitzhardinge err = -ENOMEM; 15670d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "allocating rx ring page"); 15681ca2983aSWei Liu goto alloc_rx_ring_fail; 15690d160211SJeremy Fitzhardinge } 15700d160211SJeremy Fitzhardinge SHARED_RING_INIT(rxs); 157130c5d7f0SJulien Grall FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE); 15720d160211SJeremy Fitzhardinge 1573ccc9d90aSWei Liu err = xenbus_grant_ring(dev, rxs, 1, &gref); 15741ca2983aSWei Liu if (err < 0) 15751ca2983aSWei Liu goto grant_rx_ring_fail; 1576ccc9d90aSWei Liu queue->rx_ring_ref = gref; 15770d160211SJeremy Fitzhardinge 1578d634bf2cSWei Liu if (feature_split_evtchn) 15792688fcb7SAndrew J. Bennieston err = setup_netfront_split(queue); 1580d634bf2cSWei Liu /* setup single event channel if 1581d634bf2cSWei Liu * a) feature-split-event-channels == 0 1582d634bf2cSWei Liu * b) feature-split-event-channels == 1 but failed to setup 1583d634bf2cSWei Liu */ 1584d634bf2cSWei Liu if (!feature_split_evtchn || (feature_split_evtchn && err)) 15852688fcb7SAndrew J. Bennieston err = setup_netfront_single(queue); 1586d634bf2cSWei Liu 15870d160211SJeremy Fitzhardinge if (err) 15881ca2983aSWei Liu goto alloc_evtchn_fail; 15890d160211SJeremy Fitzhardinge 15900d160211SJeremy Fitzhardinge return 0; 15910d160211SJeremy Fitzhardinge 15921ca2983aSWei Liu /* If we fail to setup netfront, it is safe to just revoke access to 15931ca2983aSWei Liu * granted pages because backend is not accessing it at this point. 15941ca2983aSWei Liu */ 15951ca2983aSWei Liu alloc_evtchn_fail: 15962688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); 15971ca2983aSWei Liu grant_rx_ring_fail: 15981ca2983aSWei Liu free_page((unsigned long)rxs); 15991ca2983aSWei Liu alloc_rx_ring_fail: 16002688fcb7SAndrew J. Bennieston gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); 16011ca2983aSWei Liu grant_tx_ring_fail: 16021ca2983aSWei Liu free_page((unsigned long)txs); 16030d160211SJeremy Fitzhardinge fail: 16040d160211SJeremy Fitzhardinge return err; 16050d160211SJeremy Fitzhardinge } 16060d160211SJeremy Fitzhardinge 16072688fcb7SAndrew J. Bennieston /* Queue-specific initialisation 16082688fcb7SAndrew J. Bennieston * This used to be done in xennet_create_dev() but must now 16092688fcb7SAndrew J. Bennieston * be run per-queue. 16102688fcb7SAndrew J. Bennieston */ 16112688fcb7SAndrew J. Bennieston static int xennet_init_queue(struct netfront_queue *queue) 16122688fcb7SAndrew J. Bennieston { 16132688fcb7SAndrew J. Bennieston unsigned short i; 16142688fcb7SAndrew J. Bennieston int err = 0; 161521f2706bSXiao Liang char *devid; 16162688fcb7SAndrew J. Bennieston 16172688fcb7SAndrew J. Bennieston spin_lock_init(&queue->tx_lock); 16182688fcb7SAndrew J. Bennieston spin_lock_init(&queue->rx_lock); 16192688fcb7SAndrew J. Bennieston 1620e99e88a9SKees Cook timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0); 16212688fcb7SAndrew J. Bennieston 162221f2706bSXiao Liang devid = strrchr(queue->info->xbdev->nodename, '/') + 1; 162321f2706bSXiao Liang snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", 162421f2706bSXiao Liang devid, queue->id); 16258b715010SWei Liu 16262688fcb7SAndrew J. Bennieston /* Initialise tx_skbs as a free chain containing every entry. */ 16272688fcb7SAndrew J. Bennieston queue->tx_skb_freelist = 0; 16282688fcb7SAndrew J. Bennieston for (i = 0; i < NET_TX_RING_SIZE; i++) { 16292688fcb7SAndrew J. Bennieston skb_entry_set_link(&queue->tx_skbs[i], i+1); 16302688fcb7SAndrew J. Bennieston queue->grant_tx_ref[i] = GRANT_INVALID_REF; 16312688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL; 16322688fcb7SAndrew J. Bennieston } 16332688fcb7SAndrew J. Bennieston 16342688fcb7SAndrew J. Bennieston /* Clear out rx_skbs */ 16352688fcb7SAndrew J. Bennieston for (i = 0; i < NET_RX_RING_SIZE; i++) { 16362688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL; 16372688fcb7SAndrew J. Bennieston queue->grant_rx_ref[i] = GRANT_INVALID_REF; 16382688fcb7SAndrew J. Bennieston } 16392688fcb7SAndrew J. Bennieston 16402688fcb7SAndrew J. Bennieston /* A grant for every tx ring slot */ 16411f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, 16422688fcb7SAndrew J. Bennieston &queue->gref_tx_head) < 0) { 16432688fcb7SAndrew J. Bennieston pr_alert("can't alloc tx grant refs\n"); 16442688fcb7SAndrew J. Bennieston err = -ENOMEM; 16452688fcb7SAndrew J. Bennieston goto exit; 16462688fcb7SAndrew J. Bennieston } 16472688fcb7SAndrew J. Bennieston 16482688fcb7SAndrew J. Bennieston /* A grant for every rx ring slot */ 16491f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, 16502688fcb7SAndrew J. Bennieston &queue->gref_rx_head) < 0) { 16512688fcb7SAndrew J. Bennieston pr_alert("can't alloc rx grant refs\n"); 16522688fcb7SAndrew J. Bennieston err = -ENOMEM; 16532688fcb7SAndrew J. Bennieston goto exit_free_tx; 16542688fcb7SAndrew J. Bennieston } 16552688fcb7SAndrew J. Bennieston 16562688fcb7SAndrew J. Bennieston return 0; 16572688fcb7SAndrew J. Bennieston 16582688fcb7SAndrew J. Bennieston exit_free_tx: 16592688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head); 16602688fcb7SAndrew J. Bennieston exit: 16612688fcb7SAndrew J. Bennieston return err; 16622688fcb7SAndrew J. Bennieston } 16632688fcb7SAndrew J. Bennieston 166450ee6061SAndrew J. Bennieston static int write_queue_xenstore_keys(struct netfront_queue *queue, 166550ee6061SAndrew J. Bennieston struct xenbus_transaction *xbt, int write_hierarchical) 166650ee6061SAndrew J. Bennieston { 166750ee6061SAndrew J. Bennieston /* Write the queue-specific keys into XenStore in the traditional 166850ee6061SAndrew J. Bennieston * way for a single queue, or in a queue subkeys for multiple 166950ee6061SAndrew J. Bennieston * queues. 167050ee6061SAndrew J. Bennieston */ 167150ee6061SAndrew J. Bennieston struct xenbus_device *dev = queue->info->xbdev; 167250ee6061SAndrew J. Bennieston int err; 167350ee6061SAndrew J. Bennieston const char *message; 167450ee6061SAndrew J. Bennieston char *path; 167550ee6061SAndrew J. Bennieston size_t pathsize; 167650ee6061SAndrew J. Bennieston 167750ee6061SAndrew J. Bennieston /* Choose the correct place to write the keys */ 167850ee6061SAndrew J. Bennieston if (write_hierarchical) { 167950ee6061SAndrew J. Bennieston pathsize = strlen(dev->nodename) + 10; 168050ee6061SAndrew J. Bennieston path = kzalloc(pathsize, GFP_KERNEL); 168150ee6061SAndrew J. Bennieston if (!path) { 168250ee6061SAndrew J. Bennieston err = -ENOMEM; 168350ee6061SAndrew J. Bennieston message = "out of memory while writing ring references"; 168450ee6061SAndrew J. Bennieston goto error; 168550ee6061SAndrew J. Bennieston } 168650ee6061SAndrew J. Bennieston snprintf(path, pathsize, "%s/queue-%u", 168750ee6061SAndrew J. Bennieston dev->nodename, queue->id); 168850ee6061SAndrew J. Bennieston } else { 168950ee6061SAndrew J. Bennieston path = (char *)dev->nodename; 169050ee6061SAndrew J. Bennieston } 169150ee6061SAndrew J. Bennieston 169250ee6061SAndrew J. Bennieston /* Write ring references */ 169350ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", 169450ee6061SAndrew J. Bennieston queue->tx_ring_ref); 169550ee6061SAndrew J. Bennieston if (err) { 169650ee6061SAndrew J. Bennieston message = "writing tx-ring-ref"; 169750ee6061SAndrew J. Bennieston goto error; 169850ee6061SAndrew J. Bennieston } 169950ee6061SAndrew J. Bennieston 170050ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u", 170150ee6061SAndrew J. Bennieston queue->rx_ring_ref); 170250ee6061SAndrew J. Bennieston if (err) { 170350ee6061SAndrew J. Bennieston message = "writing rx-ring-ref"; 170450ee6061SAndrew J. Bennieston goto error; 170550ee6061SAndrew J. Bennieston } 170650ee6061SAndrew J. Bennieston 170750ee6061SAndrew J. Bennieston /* Write event channels; taking into account both shared 170850ee6061SAndrew J. Bennieston * and split event channel scenarios. 170950ee6061SAndrew J. Bennieston */ 171050ee6061SAndrew J. Bennieston if (queue->tx_evtchn == queue->rx_evtchn) { 171150ee6061SAndrew J. Bennieston /* Shared event channel */ 171250ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 171350ee6061SAndrew J. Bennieston "event-channel", "%u", queue->tx_evtchn); 171450ee6061SAndrew J. Bennieston if (err) { 171550ee6061SAndrew J. Bennieston message = "writing event-channel"; 171650ee6061SAndrew J. Bennieston goto error; 171750ee6061SAndrew J. Bennieston } 171850ee6061SAndrew J. Bennieston } else { 171950ee6061SAndrew J. Bennieston /* Split event channels */ 172050ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 172150ee6061SAndrew J. Bennieston "event-channel-tx", "%u", queue->tx_evtchn); 172250ee6061SAndrew J. Bennieston if (err) { 172350ee6061SAndrew J. Bennieston message = "writing event-channel-tx"; 172450ee6061SAndrew J. Bennieston goto error; 172550ee6061SAndrew J. Bennieston } 172650ee6061SAndrew J. Bennieston 172750ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, 172850ee6061SAndrew J. Bennieston "event-channel-rx", "%u", queue->rx_evtchn); 172950ee6061SAndrew J. Bennieston if (err) { 173050ee6061SAndrew J. Bennieston message = "writing event-channel-rx"; 173150ee6061SAndrew J. Bennieston goto error; 173250ee6061SAndrew J. Bennieston } 173350ee6061SAndrew J. Bennieston } 173450ee6061SAndrew J. Bennieston 173550ee6061SAndrew J. Bennieston if (write_hierarchical) 173650ee6061SAndrew J. Bennieston kfree(path); 173750ee6061SAndrew J. Bennieston return 0; 173850ee6061SAndrew J. Bennieston 173950ee6061SAndrew J. Bennieston error: 174050ee6061SAndrew J. Bennieston if (write_hierarchical) 174150ee6061SAndrew J. Bennieston kfree(path); 174250ee6061SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "%s", message); 174350ee6061SAndrew J. Bennieston return err; 174450ee6061SAndrew J. Bennieston } 174550ee6061SAndrew J. Bennieston 1746ce58725fSDavid Vrabel static void xennet_destroy_queues(struct netfront_info *info) 1747ce58725fSDavid Vrabel { 1748ce58725fSDavid Vrabel unsigned int i; 1749ce58725fSDavid Vrabel 1750ce58725fSDavid Vrabel for (i = 0; i < info->netdev->real_num_tx_queues; i++) { 1751ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1752ce58725fSDavid Vrabel 1753ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1754ce58725fSDavid Vrabel napi_disable(&queue->napi); 1755ce58725fSDavid Vrabel netif_napi_del(&queue->napi); 1756ce58725fSDavid Vrabel } 1757ce58725fSDavid Vrabel 1758ce58725fSDavid Vrabel kfree(info->queues); 1759ce58725fSDavid Vrabel info->queues = NULL; 1760ce58725fSDavid Vrabel } 1761ce58725fSDavid Vrabel 1762ce58725fSDavid Vrabel static int xennet_create_queues(struct netfront_info *info, 1763ca88ea12SJoe Jin unsigned int *num_queues) 1764ce58725fSDavid Vrabel { 1765ce58725fSDavid Vrabel unsigned int i; 1766ce58725fSDavid Vrabel int ret; 1767ce58725fSDavid Vrabel 1768ca88ea12SJoe Jin info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue), 1769ce58725fSDavid Vrabel GFP_KERNEL); 1770ce58725fSDavid Vrabel if (!info->queues) 1771ce58725fSDavid Vrabel return -ENOMEM; 1772ce58725fSDavid Vrabel 1773ca88ea12SJoe Jin for (i = 0; i < *num_queues; i++) { 1774ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i]; 1775ce58725fSDavid Vrabel 1776ce58725fSDavid Vrabel queue->id = i; 1777ce58725fSDavid Vrabel queue->info = info; 1778ce58725fSDavid Vrabel 1779ce58725fSDavid Vrabel ret = xennet_init_queue(queue); 1780ce58725fSDavid Vrabel if (ret < 0) { 1781f599c64fSRoss Lagerwall dev_warn(&info->xbdev->dev, 178269cb8524SDavid Vrabel "only created %d queues\n", i); 1783ca88ea12SJoe Jin *num_queues = i; 1784ce58725fSDavid Vrabel break; 1785ce58725fSDavid Vrabel } 1786ce58725fSDavid Vrabel 1787ce58725fSDavid Vrabel netif_napi_add(queue->info->netdev, &queue->napi, 1788ce58725fSDavid Vrabel xennet_poll, 64); 1789ce58725fSDavid Vrabel if (netif_running(info->netdev)) 1790ce58725fSDavid Vrabel napi_enable(&queue->napi); 1791ce58725fSDavid Vrabel } 1792ce58725fSDavid Vrabel 1793ca88ea12SJoe Jin netif_set_real_num_tx_queues(info->netdev, *num_queues); 1794ce58725fSDavid Vrabel 1795ca88ea12SJoe Jin if (*num_queues == 0) { 1796f599c64fSRoss Lagerwall dev_err(&info->xbdev->dev, "no queues\n"); 1797ce58725fSDavid Vrabel return -EINVAL; 1798ce58725fSDavid Vrabel } 1799ce58725fSDavid Vrabel return 0; 1800ce58725fSDavid Vrabel } 1801ce58725fSDavid Vrabel 18020d160211SJeremy Fitzhardinge /* Common code used when first setting up, and when resuming. */ 1803f502bf2bSIan Campbell static int talk_to_netback(struct xenbus_device *dev, 18040d160211SJeremy Fitzhardinge struct netfront_info *info) 18050d160211SJeremy Fitzhardinge { 18060d160211SJeremy Fitzhardinge const char *message; 18070d160211SJeremy Fitzhardinge struct xenbus_transaction xbt; 18080d160211SJeremy Fitzhardinge int err; 18092688fcb7SAndrew J. Bennieston unsigned int feature_split_evtchn; 18102688fcb7SAndrew J. Bennieston unsigned int i = 0; 181150ee6061SAndrew J. Bennieston unsigned int max_queues = 0; 18122688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 18132688fcb7SAndrew J. Bennieston unsigned int num_queues = 1; 18140d160211SJeremy Fitzhardinge 18152688fcb7SAndrew J. Bennieston info->netdev->irq = 0; 18162688fcb7SAndrew J. Bennieston 181750ee6061SAndrew J. Bennieston /* Check if backend supports multiple queues */ 18182890ea5cSJuergen Gross max_queues = xenbus_read_unsigned(info->xbdev->otherend, 18192890ea5cSJuergen Gross "multi-queue-max-queues", 1); 182050ee6061SAndrew J. Bennieston num_queues = min(max_queues, xennet_max_queues); 182150ee6061SAndrew J. Bennieston 18222688fcb7SAndrew J. Bennieston /* Check feature-split-event-channels */ 18232890ea5cSJuergen Gross feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, 18242890ea5cSJuergen Gross "feature-split-event-channels", 0); 18252688fcb7SAndrew J. Bennieston 18262688fcb7SAndrew J. Bennieston /* Read mac addr. */ 18272688fcb7SAndrew J. Bennieston err = xen_net_read_mac(dev, info->netdev->dev_addr); 18282688fcb7SAndrew J. Bennieston if (err) { 18292688fcb7SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1830cb257783SRoss Lagerwall goto out_unlocked; 18312688fcb7SAndrew J. Bennieston } 18322688fcb7SAndrew J. Bennieston 1833f599c64fSRoss Lagerwall rtnl_lock(); 1834ce58725fSDavid Vrabel if (info->queues) 1835ce58725fSDavid Vrabel xennet_destroy_queues(info); 1836ce58725fSDavid Vrabel 1837ca88ea12SJoe Jin err = xennet_create_queues(info, &num_queues); 1838e2e004acSRoss Lagerwall if (err < 0) { 1839e2e004acSRoss Lagerwall xenbus_dev_fatal(dev, err, "creating queues"); 1840e2e004acSRoss Lagerwall kfree(info->queues); 1841e2e004acSRoss Lagerwall info->queues = NULL; 1842e2e004acSRoss Lagerwall goto out; 1843e2e004acSRoss Lagerwall } 1844f599c64fSRoss Lagerwall rtnl_unlock(); 18452688fcb7SAndrew J. Bennieston 18462688fcb7SAndrew J. Bennieston /* Create shared ring, alloc event channel -- for each queue */ 18472688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 18482688fcb7SAndrew J. Bennieston queue = &info->queues[i]; 18492688fcb7SAndrew J. Bennieston err = setup_netfront(dev, queue, feature_split_evtchn); 1850e2e004acSRoss Lagerwall if (err) 18512688fcb7SAndrew J. Bennieston goto destroy_ring; 18522688fcb7SAndrew J. Bennieston } 18530d160211SJeremy Fitzhardinge 18540d160211SJeremy Fitzhardinge again: 18550d160211SJeremy Fitzhardinge err = xenbus_transaction_start(&xbt); 18560d160211SJeremy Fitzhardinge if (err) { 18570d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "starting transaction"); 18580d160211SJeremy Fitzhardinge goto destroy_ring; 18590d160211SJeremy Fitzhardinge } 18600d160211SJeremy Fitzhardinge 1861812494d9Schas williams if (xenbus_exists(XBT_NIL, 1862812494d9Schas williams info->xbdev->otherend, "multi-queue-max-queues")) { 1863812494d9Schas williams /* Write the number of queues */ 1864812494d9Schas williams err = xenbus_printf(xbt, dev->nodename, 1865812494d9Schas williams "multi-queue-num-queues", "%u", num_queues); 1866812494d9Schas williams if (err) { 1867812494d9Schas williams message = "writing multi-queue-num-queues"; 1868812494d9Schas williams goto abort_transaction_no_dev_fatal; 1869812494d9Schas williams } 1870812494d9Schas williams } 1871812494d9Schas williams 187250ee6061SAndrew J. Bennieston if (num_queues == 1) { 187350ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */ 187450ee6061SAndrew J. Bennieston if (err) 187550ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1876d634bf2cSWei Liu } else { 187750ee6061SAndrew J. Bennieston /* Write the keys for each queue */ 187850ee6061SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) { 187950ee6061SAndrew J. Bennieston queue = &info->queues[i]; 188050ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ 188150ee6061SAndrew J. Bennieston if (err) 188250ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal; 1883d634bf2cSWei Liu } 1884d634bf2cSWei Liu } 18850d160211SJeremy Fitzhardinge 188650ee6061SAndrew J. Bennieston /* The remaining keys are not queue-specific */ 18870d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 18880d160211SJeremy Fitzhardinge 1); 18890d160211SJeremy Fitzhardinge if (err) { 18900d160211SJeremy Fitzhardinge message = "writing request-rx-copy"; 18910d160211SJeremy Fitzhardinge goto abort_transaction; 18920d160211SJeremy Fitzhardinge } 18930d160211SJeremy Fitzhardinge 18940d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 18950d160211SJeremy Fitzhardinge if (err) { 18960d160211SJeremy Fitzhardinge message = "writing feature-rx-notify"; 18970d160211SJeremy Fitzhardinge goto abort_transaction; 18980d160211SJeremy Fitzhardinge } 18990d160211SJeremy Fitzhardinge 19000d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 19010d160211SJeremy Fitzhardinge if (err) { 19020d160211SJeremy Fitzhardinge message = "writing feature-sg"; 19030d160211SJeremy Fitzhardinge goto abort_transaction; 19040d160211SJeremy Fitzhardinge } 19050d160211SJeremy Fitzhardinge 19060d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 19070d160211SJeremy Fitzhardinge if (err) { 19080d160211SJeremy Fitzhardinge message = "writing feature-gso-tcpv4"; 19090d160211SJeremy Fitzhardinge goto abort_transaction; 19100d160211SJeremy Fitzhardinge } 19110d160211SJeremy Fitzhardinge 19122c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1"); 19132c0057deSPaul Durrant if (err) { 19142c0057deSPaul Durrant message = "writing feature-gso-tcpv6"; 19152c0057deSPaul Durrant goto abort_transaction; 19162c0057deSPaul Durrant } 19172c0057deSPaul Durrant 19182c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload", 19192c0057deSPaul Durrant "1"); 19202c0057deSPaul Durrant if (err) { 19212c0057deSPaul Durrant message = "writing feature-ipv6-csum-offload"; 19222c0057deSPaul Durrant goto abort_transaction; 19232c0057deSPaul Durrant } 19242c0057deSPaul Durrant 19250d160211SJeremy Fitzhardinge err = xenbus_transaction_end(xbt, 0); 19260d160211SJeremy Fitzhardinge if (err) { 19270d160211SJeremy Fitzhardinge if (err == -EAGAIN) 19280d160211SJeremy Fitzhardinge goto again; 19290d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "completing transaction"); 19300d160211SJeremy Fitzhardinge goto destroy_ring; 19310d160211SJeremy Fitzhardinge } 19320d160211SJeremy Fitzhardinge 19330d160211SJeremy Fitzhardinge return 0; 19340d160211SJeremy Fitzhardinge 19350d160211SJeremy Fitzhardinge abort_transaction: 19360d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "%s", message); 193750ee6061SAndrew J. Bennieston abort_transaction_no_dev_fatal: 193850ee6061SAndrew J. Bennieston xenbus_transaction_end(xbt, 1); 19390d160211SJeremy Fitzhardinge destroy_ring: 19400d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 1941f599c64fSRoss Lagerwall rtnl_lock(); 1942e2e004acSRoss Lagerwall xennet_destroy_queues(info); 19430d160211SJeremy Fitzhardinge out: 1944f599c64fSRoss Lagerwall rtnl_unlock(); 1945cb257783SRoss Lagerwall out_unlocked: 1946d86b5672SVitaly Kuznetsov device_unregister(&dev->dev); 19470d160211SJeremy Fitzhardinge return err; 19480d160211SJeremy Fitzhardinge } 19490d160211SJeremy Fitzhardinge 19500d160211SJeremy Fitzhardinge static int xennet_connect(struct net_device *dev) 19510d160211SJeremy Fitzhardinge { 19520d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev); 19532688fcb7SAndrew J. Bennieston unsigned int num_queues = 0; 1954a5b5dc3cSDavid Vrabel int err; 19552688fcb7SAndrew J. Bennieston unsigned int j = 0; 19562688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL; 19570d160211SJeremy Fitzhardinge 19582890ea5cSJuergen Gross if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { 19590d160211SJeremy Fitzhardinge dev_info(&dev->dev, 1960898eb71cSJoe Perches "backend does not support copying receive path\n"); 19610d160211SJeremy Fitzhardinge return -ENODEV; 19620d160211SJeremy Fitzhardinge } 19630d160211SJeremy Fitzhardinge 1964f502bf2bSIan Campbell err = talk_to_netback(np->xbdev, np); 19650d160211SJeremy Fitzhardinge if (err) 19660d160211SJeremy Fitzhardinge return err; 19670d160211SJeremy Fitzhardinge 19682688fcb7SAndrew J. Bennieston /* talk_to_netback() sets the correct number of queues */ 19692688fcb7SAndrew J. Bennieston num_queues = dev->real_num_tx_queues; 19702688fcb7SAndrew J. Bennieston 1971f599c64fSRoss Lagerwall if (dev->reg_state == NETREG_UNINITIALIZED) { 1972f599c64fSRoss Lagerwall err = register_netdev(dev); 1973f599c64fSRoss Lagerwall if (err) { 1974f599c64fSRoss Lagerwall pr_warn("%s: register_netdev err=%d\n", __func__, err); 1975f599c64fSRoss Lagerwall device_unregister(&np->xbdev->dev); 1976f599c64fSRoss Lagerwall return err; 1977f599c64fSRoss Lagerwall } 1978f599c64fSRoss Lagerwall } 1979f599c64fSRoss Lagerwall 198045c8184cSRoss Lagerwall rtnl_lock(); 198145c8184cSRoss Lagerwall netdev_update_features(dev); 198245c8184cSRoss Lagerwall rtnl_unlock(); 198345c8184cSRoss Lagerwall 19840d160211SJeremy Fitzhardinge /* 1985a5b5dc3cSDavid Vrabel * All public and private state should now be sane. Get 19860d160211SJeremy Fitzhardinge * ready to start sending and receiving packets and give the driver 19870d160211SJeremy Fitzhardinge * domain a kick because we've probably just requeued some 19880d160211SJeremy Fitzhardinge * packets. 19890d160211SJeremy Fitzhardinge */ 19900d160211SJeremy Fitzhardinge netif_carrier_on(np->netdev); 19912688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) { 19922688fcb7SAndrew J. Bennieston queue = &np->queues[j]; 1993f50b4076SDavid Vrabel 19942688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq); 19952688fcb7SAndrew J. Bennieston if (queue->tx_irq != queue->rx_irq) 19962688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq); 19970d160211SJeremy Fitzhardinge 1998f50b4076SDavid Vrabel spin_lock_irq(&queue->tx_lock); 1999f50b4076SDavid Vrabel xennet_tx_buf_gc(queue); 20002688fcb7SAndrew J. Bennieston spin_unlock_irq(&queue->tx_lock); 2001f50b4076SDavid Vrabel 2002f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock); 2003f50b4076SDavid Vrabel xennet_alloc_rx_buffers(queue); 20042688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock); 20052688fcb7SAndrew J. Bennieston } 20060d160211SJeremy Fitzhardinge 20070d160211SJeremy Fitzhardinge return 0; 20080d160211SJeremy Fitzhardinge } 20090d160211SJeremy Fitzhardinge 20100d160211SJeremy Fitzhardinge /** 20110d160211SJeremy Fitzhardinge * Callback received when the backend's state changes. 20120d160211SJeremy Fitzhardinge */ 2013f502bf2bSIan Campbell static void netback_changed(struct xenbus_device *dev, 20140d160211SJeremy Fitzhardinge enum xenbus_state backend_state) 20150d160211SJeremy Fitzhardinge { 20161b713e00SGreg Kroah-Hartman struct netfront_info *np = dev_get_drvdata(&dev->dev); 20170d160211SJeremy Fitzhardinge struct net_device *netdev = np->netdev; 20180d160211SJeremy Fitzhardinge 20190d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 20200d160211SJeremy Fitzhardinge 20218edfe2e9SJuergen Gross wake_up_all(&module_wq); 20228edfe2e9SJuergen Gross 20230d160211SJeremy Fitzhardinge switch (backend_state) { 20240d160211SJeremy Fitzhardinge case XenbusStateInitialising: 20250d160211SJeremy Fitzhardinge case XenbusStateInitialised: 2026b78c9512SNoboru Iwamatsu case XenbusStateReconfiguring: 2027b78c9512SNoboru Iwamatsu case XenbusStateReconfigured: 20280d160211SJeremy Fitzhardinge case XenbusStateUnknown: 20290d160211SJeremy Fitzhardinge break; 20300d160211SJeremy Fitzhardinge 20310d160211SJeremy Fitzhardinge case XenbusStateInitWait: 20320d160211SJeremy Fitzhardinge if (dev->state != XenbusStateInitialising) 20330d160211SJeremy Fitzhardinge break; 20340d160211SJeremy Fitzhardinge if (xennet_connect(netdev) != 0) 20350d160211SJeremy Fitzhardinge break; 20360d160211SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateConnected); 203708e34eb1SLaszlo Ersek break; 203808e34eb1SLaszlo Ersek 203908e34eb1SLaszlo Ersek case XenbusStateConnected: 2040ee89bab1SAmerigo Wang netdev_notify_peers(netdev); 20410d160211SJeremy Fitzhardinge break; 20420d160211SJeremy Fitzhardinge 2043bce3ea81SDavid Vrabel case XenbusStateClosed: 2044bce3ea81SDavid Vrabel if (dev->state == XenbusStateClosed) 2045bce3ea81SDavid Vrabel break; 2046a32b9d91SGustavo A. R. Silva /* Fall through - Missed the backend's CLOSING state. */ 20470d160211SJeremy Fitzhardinge case XenbusStateClosing: 20480d160211SJeremy Fitzhardinge xenbus_frontend_closed(dev); 20490d160211SJeremy Fitzhardinge break; 20500d160211SJeremy Fitzhardinge } 20510d160211SJeremy Fitzhardinge } 20520d160211SJeremy Fitzhardinge 2053e0ce4af9SIan Campbell static const struct xennet_stat { 2054e0ce4af9SIan Campbell char name[ETH_GSTRING_LEN]; 2055e0ce4af9SIan Campbell u16 offset; 2056e0ce4af9SIan Campbell } xennet_stats[] = { 2057e0ce4af9SIan Campbell { 2058e0ce4af9SIan Campbell "rx_gso_checksum_fixup", 2059e0ce4af9SIan Campbell offsetof(struct netfront_info, rx_gso_checksum_fixup) 2060e0ce4af9SIan Campbell }, 2061e0ce4af9SIan Campbell }; 2062e0ce4af9SIan Campbell 2063e0ce4af9SIan Campbell static int xennet_get_sset_count(struct net_device *dev, int string_set) 2064e0ce4af9SIan Campbell { 2065e0ce4af9SIan Campbell switch (string_set) { 2066e0ce4af9SIan Campbell case ETH_SS_STATS: 2067e0ce4af9SIan Campbell return ARRAY_SIZE(xennet_stats); 2068e0ce4af9SIan Campbell default: 2069e0ce4af9SIan Campbell return -EINVAL; 2070e0ce4af9SIan Campbell } 2071e0ce4af9SIan Campbell } 2072e0ce4af9SIan Campbell 2073e0ce4af9SIan Campbell static void xennet_get_ethtool_stats(struct net_device *dev, 2074e0ce4af9SIan Campbell struct ethtool_stats *stats, u64 * data) 2075e0ce4af9SIan Campbell { 2076e0ce4af9SIan Campbell void *np = netdev_priv(dev); 2077e0ce4af9SIan Campbell int i; 2078e0ce4af9SIan Campbell 2079e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 20802688fcb7SAndrew J. Bennieston data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset)); 2081e0ce4af9SIan Campbell } 2082e0ce4af9SIan Campbell 2083e0ce4af9SIan Campbell static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) 2084e0ce4af9SIan Campbell { 2085e0ce4af9SIan Campbell int i; 2086e0ce4af9SIan Campbell 2087e0ce4af9SIan Campbell switch (stringset) { 2088e0ce4af9SIan Campbell case ETH_SS_STATS: 2089e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) 2090e0ce4af9SIan Campbell memcpy(data + i * ETH_GSTRING_LEN, 2091e0ce4af9SIan Campbell xennet_stats[i].name, ETH_GSTRING_LEN); 2092e0ce4af9SIan Campbell break; 2093e0ce4af9SIan Campbell } 2094e0ce4af9SIan Campbell } 2095e0ce4af9SIan Campbell 20960fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops = 20970d160211SJeremy Fitzhardinge { 20980d160211SJeremy Fitzhardinge .get_link = ethtool_op_get_link, 2099e0ce4af9SIan Campbell 2100e0ce4af9SIan Campbell .get_sset_count = xennet_get_sset_count, 2101e0ce4af9SIan Campbell .get_ethtool_stats = xennet_get_ethtool_stats, 2102e0ce4af9SIan Campbell .get_strings = xennet_get_strings, 21030d160211SJeremy Fitzhardinge }; 21040d160211SJeremy Fitzhardinge 21050d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS 21061f3c2ebaSDavid Vrabel static ssize_t show_rxbuf(struct device *dev, 21070d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf) 21080d160211SJeremy Fitzhardinge { 21091f3c2ebaSDavid Vrabel return sprintf(buf, "%lu\n", NET_RX_RING_SIZE); 21100d160211SJeremy Fitzhardinge } 21110d160211SJeremy Fitzhardinge 21121f3c2ebaSDavid Vrabel static ssize_t store_rxbuf(struct device *dev, 21130d160211SJeremy Fitzhardinge struct device_attribute *attr, 21140d160211SJeremy Fitzhardinge const char *buf, size_t len) 21150d160211SJeremy Fitzhardinge { 21160d160211SJeremy Fitzhardinge char *endp; 21170d160211SJeremy Fitzhardinge unsigned long target; 21180d160211SJeremy Fitzhardinge 21190d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN)) 21200d160211SJeremy Fitzhardinge return -EPERM; 21210d160211SJeremy Fitzhardinge 21220d160211SJeremy Fitzhardinge target = simple_strtoul(buf, &endp, 0); 21230d160211SJeremy Fitzhardinge if (endp == buf) 21240d160211SJeremy Fitzhardinge return -EBADMSG; 21250d160211SJeremy Fitzhardinge 21261f3c2ebaSDavid Vrabel /* rxbuf_min and rxbuf_max are no longer configurable. */ 21270d160211SJeremy Fitzhardinge 21280d160211SJeremy Fitzhardinge return len; 21290d160211SJeremy Fitzhardinge } 21300d160211SJeremy Fitzhardinge 2131d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf); 2132d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf); 2133d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL); 213427b917e5STakashi Iwai 213527b917e5STakashi Iwai static struct attribute *xennet_dev_attrs[] = { 213627b917e5STakashi Iwai &dev_attr_rxbuf_min.attr, 213727b917e5STakashi Iwai &dev_attr_rxbuf_max.attr, 213827b917e5STakashi Iwai &dev_attr_rxbuf_cur.attr, 213927b917e5STakashi Iwai NULL 21400d160211SJeremy Fitzhardinge }; 21410d160211SJeremy Fitzhardinge 214227b917e5STakashi Iwai static const struct attribute_group xennet_dev_group = { 214327b917e5STakashi Iwai .attrs = xennet_dev_attrs 214427b917e5STakashi Iwai }; 21450d160211SJeremy Fitzhardinge #endif /* CONFIG_SYSFS */ 21460d160211SJeremy Fitzhardinge 2147c2c63310SAndrea Righi static void xennet_bus_close(struct xenbus_device *dev) 2148c2c63310SAndrea Righi { 2149c2c63310SAndrea Righi int ret; 2150c2c63310SAndrea Righi 2151c2c63310SAndrea Righi if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2152c2c63310SAndrea Righi return; 2153c2c63310SAndrea Righi do { 2154c2c63310SAndrea Righi xenbus_switch_state(dev, XenbusStateClosing); 2155c2c63310SAndrea Righi ret = wait_event_timeout(module_wq, 2156c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2157c2c63310SAndrea Righi XenbusStateClosing || 2158c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2159c2c63310SAndrea Righi XenbusStateClosed || 2160c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2161c2c63310SAndrea Righi XenbusStateUnknown, 2162c2c63310SAndrea Righi XENNET_TIMEOUT); 2163c2c63310SAndrea Righi } while (!ret); 2164c2c63310SAndrea Righi 2165c2c63310SAndrea Righi if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed) 2166c2c63310SAndrea Righi return; 2167c2c63310SAndrea Righi 2168c2c63310SAndrea Righi do { 2169c2c63310SAndrea Righi xenbus_switch_state(dev, XenbusStateClosed); 2170c2c63310SAndrea Righi ret = wait_event_timeout(module_wq, 2171c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2172c2c63310SAndrea Righi XenbusStateClosed || 2173c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) == 2174c2c63310SAndrea Righi XenbusStateUnknown, 2175c2c63310SAndrea Righi XENNET_TIMEOUT); 2176c2c63310SAndrea Righi } while (!ret); 2177c2c63310SAndrea Righi } 2178c2c63310SAndrea Righi 21798e0e46bbSBill Pemberton static int xennet_remove(struct xenbus_device *dev) 21800d160211SJeremy Fitzhardinge { 21811b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev); 21820d160211SJeremy Fitzhardinge 2183c2c63310SAndrea Righi xennet_bus_close(dev); 21840d160211SJeremy Fitzhardinge xennet_disconnect_backend(info); 21850d160211SJeremy Fitzhardinge 2186f599c64fSRoss Lagerwall if (info->netdev->reg_state == NETREG_REGISTERED) 21876bc96d04SIan Campbell unregister_netdev(info->netdev); 21886bc96d04SIan Campbell 2189f599c64fSRoss Lagerwall if (info->queues) { 2190f599c64fSRoss Lagerwall rtnl_lock(); 2191ad068118SDavid Vrabel xennet_destroy_queues(info); 2192f599c64fSRoss Lagerwall rtnl_unlock(); 2193f599c64fSRoss Lagerwall } 2194900e1833SDavid Vrabel xennet_free_netdev(info->netdev); 21950d160211SJeremy Fitzhardinge 21960d160211SJeremy Fitzhardinge return 0; 21970d160211SJeremy Fitzhardinge } 21980d160211SJeremy Fitzhardinge 219995afae48SDavid Vrabel static const struct xenbus_device_id netfront_ids[] = { 220095afae48SDavid Vrabel { "vif" }, 220195afae48SDavid Vrabel { "" } 220295afae48SDavid Vrabel }; 220395afae48SDavid Vrabel 220495afae48SDavid Vrabel static struct xenbus_driver netfront_driver = { 220595afae48SDavid Vrabel .ids = netfront_ids, 22060d160211SJeremy Fitzhardinge .probe = netfront_probe, 22078e0e46bbSBill Pemberton .remove = xennet_remove, 22080d160211SJeremy Fitzhardinge .resume = netfront_resume, 2209f502bf2bSIan Campbell .otherend_changed = netback_changed, 221095afae48SDavid Vrabel }; 22110d160211SJeremy Fitzhardinge 22120d160211SJeremy Fitzhardinge static int __init netif_init(void) 22130d160211SJeremy Fitzhardinge { 22146e833587SJeremy Fitzhardinge if (!xen_domain()) 22150d160211SJeremy Fitzhardinge return -ENODEV; 22160d160211SJeremy Fitzhardinge 221751c71a3bSKonrad Rzeszutek Wilk if (!xen_has_pv_nic_devices()) 2218b9136d20SIgor Mammedov return -ENODEV; 2219b9136d20SIgor Mammedov 2220383eda32SJoe Perches pr_info("Initialising Xen virtual ethernet driver\n"); 22210d160211SJeremy Fitzhardinge 2222034702a6SJuergen Gross /* Allow as many queues as there are CPUs inut max. 8 if user has not 222332a84405SWei Liu * specified a value. 222432a84405SWei Liu */ 222532a84405SWei Liu if (xennet_max_queues == 0) 2226034702a6SJuergen Gross xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT, 2227034702a6SJuergen Gross num_online_cpus()); 222850ee6061SAndrew J. Bennieston 2229ffb78a26SAl Viro return xenbus_register_frontend(&netfront_driver); 22300d160211SJeremy Fitzhardinge } 22310d160211SJeremy Fitzhardinge module_init(netif_init); 22320d160211SJeremy Fitzhardinge 22330d160211SJeremy Fitzhardinge 22340d160211SJeremy Fitzhardinge static void __exit netif_exit(void) 22350d160211SJeremy Fitzhardinge { 2236ffb78a26SAl Viro xenbus_unregister_driver(&netfront_driver); 22370d160211SJeremy Fitzhardinge } 22380d160211SJeremy Fitzhardinge module_exit(netif_exit); 22390d160211SJeremy Fitzhardinge 22400d160211SJeremy Fitzhardinge MODULE_DESCRIPTION("Xen virtual network device frontend"); 22410d160211SJeremy Fitzhardinge MODULE_LICENSE("GPL"); 2242d2f0c52bSMark McLoughlin MODULE_ALIAS("xen:vif"); 22434f93f09bSMark McLoughlin MODULE_ALIAS("xennet"); 2244