10d160211SJeremy Fitzhardinge /*
20d160211SJeremy Fitzhardinge * Virtual network driver for conversing with remote driver backends.
30d160211SJeremy Fitzhardinge *
40d160211SJeremy Fitzhardinge * Copyright (c) 2002-2005, K A Fraser
50d160211SJeremy Fitzhardinge * Copyright (c) 2005, XenSource Ltd
60d160211SJeremy Fitzhardinge *
70d160211SJeremy Fitzhardinge * This program is free software; you can redistribute it and/or
80d160211SJeremy Fitzhardinge * modify it under the terms of the GNU General Public License version 2
90d160211SJeremy Fitzhardinge * as published by the Free Software Foundation; or, when distributed
100d160211SJeremy Fitzhardinge * separately from the Linux kernel or incorporated into other
110d160211SJeremy Fitzhardinge * software packages, subject to the following license:
120d160211SJeremy Fitzhardinge *
130d160211SJeremy Fitzhardinge * Permission is hereby granted, free of charge, to any person obtaining a copy
140d160211SJeremy Fitzhardinge * of this source file (the "Software"), to deal in the Software without
150d160211SJeremy Fitzhardinge * restriction, including without limitation the rights to use, copy, modify,
160d160211SJeremy Fitzhardinge * merge, publish, distribute, sublicense, and/or sell copies of the Software,
170d160211SJeremy Fitzhardinge * and to permit persons to whom the Software is furnished to do so, subject to
180d160211SJeremy Fitzhardinge * the following conditions:
190d160211SJeremy Fitzhardinge *
200d160211SJeremy Fitzhardinge * The above copyright notice and this permission notice shall be included in
210d160211SJeremy Fitzhardinge * all copies or substantial portions of the Software.
220d160211SJeremy Fitzhardinge *
230d160211SJeremy Fitzhardinge * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
240d160211SJeremy Fitzhardinge * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
250d160211SJeremy Fitzhardinge * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
260d160211SJeremy Fitzhardinge * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
270d160211SJeremy Fitzhardinge * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
280d160211SJeremy Fitzhardinge * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
290d160211SJeremy Fitzhardinge * IN THE SOFTWARE.
300d160211SJeremy Fitzhardinge */
310d160211SJeremy Fitzhardinge
32383eda32SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33383eda32SJoe Perches
340d160211SJeremy Fitzhardinge #include <linux/module.h>
350d160211SJeremy Fitzhardinge #include <linux/kernel.h>
360d160211SJeremy Fitzhardinge #include <linux/netdevice.h>
370d160211SJeremy Fitzhardinge #include <linux/etherdevice.h>
380d160211SJeremy Fitzhardinge #include <linux/skbuff.h>
390d160211SJeremy Fitzhardinge #include <linux/ethtool.h>
400d160211SJeremy Fitzhardinge #include <linux/if_ether.h>
419ecd1a75SWei Liu #include <net/tcp.h>
420d160211SJeremy Fitzhardinge #include <linux/udp.h>
430d160211SJeremy Fitzhardinge #include <linux/moduleparam.h>
440d160211SJeremy Fitzhardinge #include <linux/mm.h>
455a0e3ad6STejun Heo #include <linux/slab.h>
460d160211SJeremy Fitzhardinge #include <net/ip.h>
476c5aa6fcSDenis Kirjanov #include <linux/bpf.h>
48a9ca9f9cSYunsheng Lin #include <net/page_pool/types.h>
496c5aa6fcSDenis Kirjanov #include <linux/bpf_trace.h>
500d160211SJeremy Fitzhardinge
511ccbf534SJeremy Fitzhardinge #include <xen/xen.h>
520d160211SJeremy Fitzhardinge #include <xen/xenbus.h>
530d160211SJeremy Fitzhardinge #include <xen/events.h>
540d160211SJeremy Fitzhardinge #include <xen/page.h>
55b9136d20SIgor Mammedov #include <xen/platform_pci.h>
560d160211SJeremy Fitzhardinge #include <xen/grant_table.h>
570d160211SJeremy Fitzhardinge
580d160211SJeremy Fitzhardinge #include <xen/interface/io/netif.h>
590d160211SJeremy Fitzhardinge #include <xen/interface/memory.h>
600d160211SJeremy Fitzhardinge #include <xen/interface/grant_table.h>
610d160211SJeremy Fitzhardinge
6250ee6061SAndrew J. Bennieston /* Module parameters */
63034702a6SJuergen Gross #define MAX_QUEUES_DEFAULT 8
6450ee6061SAndrew J. Bennieston static unsigned int xennet_max_queues;
6550ee6061SAndrew J. Bennieston module_param_named(max_queues, xennet_max_queues, uint, 0644);
6650ee6061SAndrew J. Bennieston MODULE_PARM_DESC(max_queues,
6750ee6061SAndrew J. Bennieston "Maximum number of queues per virtual interface");
6850ee6061SAndrew J. Bennieston
694491001cSRoger Pau Monne static bool __read_mostly xennet_trusted = true;
704491001cSRoger Pau Monne module_param_named(trusted, xennet_trusted, bool, 0644);
714491001cSRoger Pau Monne MODULE_PARM_DESC(trusted, "Is the backend trusted");
724491001cSRoger Pau Monne
73c2c63310SAndrea Righi #define XENNET_TIMEOUT (5 * HZ)
74c2c63310SAndrea Righi
750fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops;
760d160211SJeremy Fitzhardinge
770d160211SJeremy Fitzhardinge struct netfront_cb {
783683243bSIan Campbell int pull_to;
790d160211SJeremy Fitzhardinge };
800d160211SJeremy Fitzhardinge
810d160211SJeremy Fitzhardinge #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
820d160211SJeremy Fitzhardinge
830d160211SJeremy Fitzhardinge #define RX_COPY_THRESHOLD 256
840d160211SJeremy Fitzhardinge
8530c5d7f0SJulien Grall #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
8630c5d7f0SJulien Grall #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
871f3c2ebaSDavid Vrabel
881f3c2ebaSDavid Vrabel /* Minimum number of Rx slots (includes slot for GSO metadata). */
891f3c2ebaSDavid Vrabel #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
900d160211SJeremy Fitzhardinge
912688fcb7SAndrew J. Bennieston /* Queue name is interface name with "-qNNN" appended */
922688fcb7SAndrew J. Bennieston #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
932688fcb7SAndrew J. Bennieston
942688fcb7SAndrew J. Bennieston /* IRQ name is queue name with "-tx" or "-rx" appended */
952688fcb7SAndrew J. Bennieston #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
962688fcb7SAndrew J. Bennieston
978edfe2e9SJuergen Gross static DECLARE_WAIT_QUEUE_HEAD(module_wq);
985b5971dfSEduardo Otubo
99e00f85beSstephen hemminger struct netfront_stats {
100900e1833SDavid Vrabel u64 packets;
101900e1833SDavid Vrabel u64 bytes;
102e00f85beSstephen hemminger struct u64_stats_sync syncp;
103e00f85beSstephen hemminger };
104e00f85beSstephen hemminger
1052688fcb7SAndrew J. Bennieston struct netfront_info;
1062688fcb7SAndrew J. Bennieston
1072688fcb7SAndrew J. Bennieston struct netfront_queue {
1082688fcb7SAndrew J. Bennieston unsigned int id; /* Queue ID, 0-based */
1092688fcb7SAndrew J. Bennieston char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
1102688fcb7SAndrew J. Bennieston struct netfront_info *info;
1110d160211SJeremy Fitzhardinge
1126c5aa6fcSDenis Kirjanov struct bpf_prog __rcu *xdp_prog;
1136c5aa6fcSDenis Kirjanov
114bea3348eSStephen Hemminger struct napi_struct napi;
1150d160211SJeremy Fitzhardinge
116d634bf2cSWei Liu /* Split event channels support, tx_* == rx_* when using
117d634bf2cSWei Liu * single event channel.
118d634bf2cSWei Liu */
119d634bf2cSWei Liu unsigned int tx_evtchn, rx_evtchn;
120d634bf2cSWei Liu unsigned int tx_irq, rx_irq;
121d634bf2cSWei Liu /* Only used when split event channels support is enabled */
1222688fcb7SAndrew J. Bennieston char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
1232688fcb7SAndrew J. Bennieston char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
1240d160211SJeremy Fitzhardinge
1250d160211SJeremy Fitzhardinge spinlock_t tx_lock;
12684284d3cSJeremy Fitzhardinge struct xen_netif_tx_front_ring tx;
12784284d3cSJeremy Fitzhardinge int tx_ring_ref;
1280d160211SJeremy Fitzhardinge
1290d160211SJeremy Fitzhardinge /*
1300d160211SJeremy Fitzhardinge * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
13121631d2dSJuergen Gross * are linked from tx_skb_freelist through tx_link.
1320d160211SJeremy Fitzhardinge */
13321631d2dSJuergen Gross struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
13421631d2dSJuergen Gross unsigned short tx_link[NET_TX_RING_SIZE];
13521631d2dSJuergen Gross #define TX_LINK_NONE 0xffff
136a884daa6SJuergen Gross #define TX_PENDING 0xfffe
1370d160211SJeremy Fitzhardinge grant_ref_t gref_tx_head;
1380d160211SJeremy Fitzhardinge grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
139cefe0078SAnnie Li struct page *grant_tx_page[NET_TX_RING_SIZE];
1400d160211SJeremy Fitzhardinge unsigned tx_skb_freelist;
141a884daa6SJuergen Gross unsigned int tx_pend_queue;
1420d160211SJeremy Fitzhardinge
14384284d3cSJeremy Fitzhardinge spinlock_t rx_lock ____cacheline_aligned_in_smp;
14484284d3cSJeremy Fitzhardinge struct xen_netif_rx_front_ring rx;
14584284d3cSJeremy Fitzhardinge int rx_ring_ref;
14684284d3cSJeremy Fitzhardinge
14784284d3cSJeremy Fitzhardinge struct timer_list rx_refill_timer;
14884284d3cSJeremy Fitzhardinge
1490d160211SJeremy Fitzhardinge struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
1500d160211SJeremy Fitzhardinge grant_ref_t gref_rx_head;
1510d160211SJeremy Fitzhardinge grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
1526c5aa6fcSDenis Kirjanov
153b27d4795SJuergen Gross unsigned int rx_rsp_unconsumed;
154b27d4795SJuergen Gross spinlock_t rx_cons_lock;
155b27d4795SJuergen Gross
1566c5aa6fcSDenis Kirjanov struct page_pool *page_pool;
1576c5aa6fcSDenis Kirjanov struct xdp_rxq_info xdp_rxq;
1582688fcb7SAndrew J. Bennieston };
1592688fcb7SAndrew J. Bennieston
1602688fcb7SAndrew J. Bennieston struct netfront_info {
1612688fcb7SAndrew J. Bennieston struct list_head list;
1622688fcb7SAndrew J. Bennieston struct net_device *netdev;
1632688fcb7SAndrew J. Bennieston
1642688fcb7SAndrew J. Bennieston struct xenbus_device *xbdev;
1652688fcb7SAndrew J. Bennieston
1662688fcb7SAndrew J. Bennieston /* Multi-queue support */
1672688fcb7SAndrew J. Bennieston struct netfront_queue *queues;
168e0ce4af9SIan Campbell
169e0ce4af9SIan Campbell /* Statistics */
170900e1833SDavid Vrabel struct netfront_stats __percpu *rx_stats;
171900e1833SDavid Vrabel struct netfront_stats __percpu *tx_stats;
172e00f85beSstephen hemminger
1736c5aa6fcSDenis Kirjanov /* XDP state */
1746c5aa6fcSDenis Kirjanov bool netback_has_xdp_headroom;
1756c5aa6fcSDenis Kirjanov bool netfront_xdp_enabled;
1766c5aa6fcSDenis Kirjanov
177a884daa6SJuergen Gross /* Is device behaving sane? */
178a884daa6SJuergen Gross bool broken;
179a884daa6SJuergen Gross
1804491001cSRoger Pau Monne /* Should skbs be bounced into a zeroed buffer? */
1814491001cSRoger Pau Monne bool bounce;
1824491001cSRoger Pau Monne
1832688fcb7SAndrew J. Bennieston atomic_t rx_gso_checksum_fixup;
1840d160211SJeremy Fitzhardinge };
1850d160211SJeremy Fitzhardinge
1860d160211SJeremy Fitzhardinge struct netfront_rx_info {
1870d160211SJeremy Fitzhardinge struct xen_netif_rx_response rx;
1880d160211SJeremy Fitzhardinge struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
1890d160211SJeremy Fitzhardinge };
1900d160211SJeremy Fitzhardinge
1910d160211SJeremy Fitzhardinge /*
1920d160211SJeremy Fitzhardinge * Access macros for acquiring freeing slots in tx_skbs[].
1930d160211SJeremy Fitzhardinge */
1940d160211SJeremy Fitzhardinge
add_id_to_list(unsigned * head,unsigned short * list,unsigned short id)19521631d2dSJuergen Gross static void add_id_to_list(unsigned *head, unsigned short *list,
1960d160211SJeremy Fitzhardinge unsigned short id)
1970d160211SJeremy Fitzhardinge {
19821631d2dSJuergen Gross list[id] = *head;
1990d160211SJeremy Fitzhardinge *head = id;
2000d160211SJeremy Fitzhardinge }
2010d160211SJeremy Fitzhardinge
get_id_from_list(unsigned * head,unsigned short * list)20221631d2dSJuergen Gross static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
2030d160211SJeremy Fitzhardinge {
2040d160211SJeremy Fitzhardinge unsigned int id = *head;
20521631d2dSJuergen Gross
20621631d2dSJuergen Gross if (id != TX_LINK_NONE) {
20721631d2dSJuergen Gross *head = list[id];
20821631d2dSJuergen Gross list[id] = TX_LINK_NONE;
20921631d2dSJuergen Gross }
2100d160211SJeremy Fitzhardinge return id;
2110d160211SJeremy Fitzhardinge }
2120d160211SJeremy Fitzhardinge
xennet_rxidx(RING_IDX idx)2130d160211SJeremy Fitzhardinge static int xennet_rxidx(RING_IDX idx)
2140d160211SJeremy Fitzhardinge {
2150d160211SJeremy Fitzhardinge return idx & (NET_RX_RING_SIZE - 1);
2160d160211SJeremy Fitzhardinge }
2170d160211SJeremy Fitzhardinge
xennet_get_rx_skb(struct netfront_queue * queue,RING_IDX ri)2182688fcb7SAndrew J. Bennieston static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
2190d160211SJeremy Fitzhardinge RING_IDX ri)
2200d160211SJeremy Fitzhardinge {
2210d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri);
2222688fcb7SAndrew J. Bennieston struct sk_buff *skb = queue->rx_skbs[i];
2232688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL;
2240d160211SJeremy Fitzhardinge return skb;
2250d160211SJeremy Fitzhardinge }
2260d160211SJeremy Fitzhardinge
xennet_get_rx_ref(struct netfront_queue * queue,RING_IDX ri)2272688fcb7SAndrew J. Bennieston static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
2280d160211SJeremy Fitzhardinge RING_IDX ri)
2290d160211SJeremy Fitzhardinge {
2300d160211SJeremy Fitzhardinge int i = xennet_rxidx(ri);
2312688fcb7SAndrew J. Bennieston grant_ref_t ref = queue->grant_rx_ref[i];
232145daab2SJuergen Gross queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2330d160211SJeremy Fitzhardinge return ref;
2340d160211SJeremy Fitzhardinge }
2350d160211SJeremy Fitzhardinge
2360d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS
23727b917e5STakashi Iwai static const struct attribute_group xennet_dev_group;
2380d160211SJeremy Fitzhardinge #endif
2390d160211SJeremy Fitzhardinge
xennet_can_sg(struct net_device * dev)2403ad9b358SMichał Mirosław static bool xennet_can_sg(struct net_device *dev)
2410d160211SJeremy Fitzhardinge {
2423ad9b358SMichał Mirosław return dev->features & NETIF_F_SG;
2430d160211SJeremy Fitzhardinge }
2440d160211SJeremy Fitzhardinge
2450d160211SJeremy Fitzhardinge
rx_refill_timeout(struct timer_list * t)246e99e88a9SKees Cook static void rx_refill_timeout(struct timer_list *t)
2470d160211SJeremy Fitzhardinge {
248e99e88a9SKees Cook struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
2492688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi);
2500d160211SJeremy Fitzhardinge }
2510d160211SJeremy Fitzhardinge
netfront_tx_slot_available(struct netfront_queue * queue)2522688fcb7SAndrew J. Bennieston static int netfront_tx_slot_available(struct netfront_queue *queue)
2530d160211SJeremy Fitzhardinge {
2542688fcb7SAndrew J. Bennieston return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
25557f230abSJuergen Gross (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
2560d160211SJeremy Fitzhardinge }
2570d160211SJeremy Fitzhardinge
xennet_maybe_wake_tx(struct netfront_queue * queue)2582688fcb7SAndrew J. Bennieston static void xennet_maybe_wake_tx(struct netfront_queue *queue)
2590d160211SJeremy Fitzhardinge {
2602688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev;
2612688fcb7SAndrew J. Bennieston struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
2620d160211SJeremy Fitzhardinge
2632688fcb7SAndrew J. Bennieston if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
2642688fcb7SAndrew J. Bennieston netfront_tx_slot_available(queue) &&
2650d160211SJeremy Fitzhardinge likely(netif_running(dev)))
2662688fcb7SAndrew J. Bennieston netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
2670d160211SJeremy Fitzhardinge }
2680d160211SJeremy Fitzhardinge
2691f3c2ebaSDavid Vrabel
xennet_alloc_one_rx_buffer(struct netfront_queue * queue)2701f3c2ebaSDavid Vrabel static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
2710d160211SJeremy Fitzhardinge {
2720d160211SJeremy Fitzhardinge struct sk_buff *skb;
2730d160211SJeremy Fitzhardinge struct page *page;
2740d160211SJeremy Fitzhardinge
2752688fcb7SAndrew J. Bennieston skb = __netdev_alloc_skb(queue->info->netdev,
2762688fcb7SAndrew J. Bennieston RX_COPY_THRESHOLD + NET_IP_ALIGN,
2770d160211SJeremy Fitzhardinge GFP_ATOMIC | __GFP_NOWARN);
2780d160211SJeremy Fitzhardinge if (unlikely(!skb))
2791f3c2ebaSDavid Vrabel return NULL;
280617a20bbSIsaku Yamahata
281307c8de2SRoger Pau Monne page = page_pool_alloc_pages(queue->page_pool,
282307c8de2SRoger Pau Monne GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
2836c5aa6fcSDenis Kirjanov if (unlikely(!page)) {
2840d160211SJeremy Fitzhardinge kfree_skb(skb);
2851f3c2ebaSDavid Vrabel return NULL;
2860d160211SJeremy Fitzhardinge }
287093b9c71SJan Beulich skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
28827aa3e4bSJesper Dangaard Brouer skb_mark_for_recycle(skb);
2890d160211SJeremy Fitzhardinge
2901f3c2ebaSDavid Vrabel /* Align ip header to a 16 bytes boundary */
2911f3c2ebaSDavid Vrabel skb_reserve(skb, NET_IP_ALIGN);
2922688fcb7SAndrew J. Bennieston skb->dev = queue->info->netdev;
2930d160211SJeremy Fitzhardinge
2941f3c2ebaSDavid Vrabel return skb;
2951f3c2ebaSDavid Vrabel }
2961f3c2ebaSDavid Vrabel
2971f3c2ebaSDavid Vrabel
xennet_alloc_rx_buffers(struct netfront_queue * queue)2981f3c2ebaSDavid Vrabel static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
2991f3c2ebaSDavid Vrabel {
3001f3c2ebaSDavid Vrabel RING_IDX req_prod = queue->rx.req_prod_pvt;
3011f3c2ebaSDavid Vrabel int notify;
302538d9291SVineeth Remanan Pillai int err = 0;
3031f3c2ebaSDavid Vrabel
3041f3c2ebaSDavid Vrabel if (unlikely(!netif_carrier_ok(queue->info->netdev)))
3051f3c2ebaSDavid Vrabel return;
3061f3c2ebaSDavid Vrabel
3071f3c2ebaSDavid Vrabel for (req_prod = queue->rx.req_prod_pvt;
3081f3c2ebaSDavid Vrabel req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
3091f3c2ebaSDavid Vrabel req_prod++) {
3101f3c2ebaSDavid Vrabel struct sk_buff *skb;
3111f3c2ebaSDavid Vrabel unsigned short id;
3121f3c2ebaSDavid Vrabel grant_ref_t ref;
31330c5d7f0SJulien Grall struct page *page;
3141f3c2ebaSDavid Vrabel struct xen_netif_rx_request *req;
3151f3c2ebaSDavid Vrabel
3161f3c2ebaSDavid Vrabel skb = xennet_alloc_one_rx_buffer(queue);
317538d9291SVineeth Remanan Pillai if (!skb) {
318538d9291SVineeth Remanan Pillai err = -ENOMEM;
3191f3c2ebaSDavid Vrabel break;
320538d9291SVineeth Remanan Pillai }
3211f3c2ebaSDavid Vrabel
3221f3c2ebaSDavid Vrabel id = xennet_rxidx(req_prod);
3230d160211SJeremy Fitzhardinge
3242688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[id]);
3252688fcb7SAndrew J. Bennieston queue->rx_skbs[id] = skb;
3260d160211SJeremy Fitzhardinge
3272688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
328269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
3292688fcb7SAndrew J. Bennieston queue->grant_rx_ref[id] = ref;
3300d160211SJeremy Fitzhardinge
33130c5d7f0SJulien Grall page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
3320d160211SJeremy Fitzhardinge
3331f3c2ebaSDavid Vrabel req = RING_GET_REQUEST(&queue->rx, req_prod);
33430c5d7f0SJulien Grall gnttab_page_grant_foreign_access_ref_one(ref,
3352688fcb7SAndrew J. Bennieston queue->info->xbdev->otherend_id,
33630c5d7f0SJulien Grall page,
3370d160211SJeremy Fitzhardinge 0);
3380d160211SJeremy Fitzhardinge req->id = id;
3390d160211SJeremy Fitzhardinge req->gref = ref;
3400d160211SJeremy Fitzhardinge }
3410d160211SJeremy Fitzhardinge
3421f3c2ebaSDavid Vrabel queue->rx.req_prod_pvt = req_prod;
3431f3c2ebaSDavid Vrabel
344538d9291SVineeth Remanan Pillai /* Try again later if there are not enough requests or skb allocation
345538d9291SVineeth Remanan Pillai * failed.
346538d9291SVineeth Remanan Pillai * Enough requests is quantified as the sum of newly created slots and
347538d9291SVineeth Remanan Pillai * the unconsumed slots at the backend.
348538d9291SVineeth Remanan Pillai */
349538d9291SVineeth Remanan Pillai if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
350538d9291SVineeth Remanan Pillai unlikely(err)) {
3511f3c2ebaSDavid Vrabel mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
3521f3c2ebaSDavid Vrabel return;
3531f3c2ebaSDavid Vrabel }
3541f3c2ebaSDavid Vrabel
3552688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
3560d160211SJeremy Fitzhardinge if (notify)
3572688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq);
3580d160211SJeremy Fitzhardinge }
3590d160211SJeremy Fitzhardinge
xennet_open(struct net_device * dev)3600d160211SJeremy Fitzhardinge static int xennet_open(struct net_device *dev)
3610d160211SJeremy Fitzhardinge {
3620d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev);
3632688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues;
3642688fcb7SAndrew J. Bennieston unsigned int i = 0;
3652688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL;
3660d160211SJeremy Fitzhardinge
367a884daa6SJuergen Gross if (!np->queues || np->broken)
368f599c64fSRoss Lagerwall return -ENODEV;
369f599c64fSRoss Lagerwall
3702688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) {
3712688fcb7SAndrew J. Bennieston queue = &np->queues[i];
3722688fcb7SAndrew J. Bennieston napi_enable(&queue->napi);
373bea3348eSStephen Hemminger
3742688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock);
3750d160211SJeremy Fitzhardinge if (netif_carrier_ok(dev)) {
3762688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue);
3772688fcb7SAndrew J. Bennieston queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
3782688fcb7SAndrew J. Bennieston if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
3792688fcb7SAndrew J. Bennieston napi_schedule(&queue->napi);
3800d160211SJeremy Fitzhardinge }
3812688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock);
3822688fcb7SAndrew J. Bennieston }
3830d160211SJeremy Fitzhardinge
3842688fcb7SAndrew J. Bennieston netif_tx_start_all_queues(dev);
3850d160211SJeremy Fitzhardinge
3860d160211SJeremy Fitzhardinge return 0;
3870d160211SJeremy Fitzhardinge }
3880d160211SJeremy Fitzhardinge
xennet_tx_buf_gc(struct netfront_queue * queue)389b27d4795SJuergen Gross static bool xennet_tx_buf_gc(struct netfront_queue *queue)
3900d160211SJeremy Fitzhardinge {
3910d160211SJeremy Fitzhardinge RING_IDX cons, prod;
3920d160211SJeremy Fitzhardinge unsigned short id;
3930d160211SJeremy Fitzhardinge struct sk_buff *skb;
3947d0105b5SMalcolm Crossley bool more_to_do;
395b27d4795SJuergen Gross bool work_done = false;
396a884daa6SJuergen Gross const struct device *dev = &queue->info->netdev->dev;
3970d160211SJeremy Fitzhardinge
3982688fcb7SAndrew J. Bennieston BUG_ON(!netif_carrier_ok(queue->info->netdev));
3990d160211SJeremy Fitzhardinge
4000d160211SJeremy Fitzhardinge do {
4012688fcb7SAndrew J. Bennieston prod = queue->tx.sring->rsp_prod;
402a884daa6SJuergen Gross if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
403a884daa6SJuergen Gross dev_alert(dev, "Illegal number of responses %u\n",
404a884daa6SJuergen Gross prod - queue->tx.rsp_cons);
405a884daa6SJuergen Gross goto err;
406a884daa6SJuergen Gross }
4070d160211SJeremy Fitzhardinge rmb(); /* Ensure we see responses up to 'rp'. */
4080d160211SJeremy Fitzhardinge
4092688fcb7SAndrew J. Bennieston for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
4108446066bSJuergen Gross struct xen_netif_tx_response txrsp;
4110d160211SJeremy Fitzhardinge
412b27d4795SJuergen Gross work_done = true;
413b27d4795SJuergen Gross
4148446066bSJuergen Gross RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
4158446066bSJuergen Gross if (txrsp.status == XEN_NETIF_RSP_NULL)
4160d160211SJeremy Fitzhardinge continue;
4170d160211SJeremy Fitzhardinge
4188446066bSJuergen Gross id = txrsp.id;
419a884daa6SJuergen Gross if (id >= RING_SIZE(&queue->tx)) {
420a884daa6SJuergen Gross dev_alert(dev,
421a884daa6SJuergen Gross "Response has incorrect id (%u)\n",
422a884daa6SJuergen Gross id);
423a884daa6SJuergen Gross goto err;
424a884daa6SJuergen Gross }
425a884daa6SJuergen Gross if (queue->tx_link[id] != TX_PENDING) {
426a884daa6SJuergen Gross dev_alert(dev,
427a884daa6SJuergen Gross "Response for inactive request\n");
428a884daa6SJuergen Gross goto err;
429a884daa6SJuergen Gross }
430a884daa6SJuergen Gross
431a884daa6SJuergen Gross queue->tx_link[id] = TX_LINK_NONE;
43221631d2dSJuergen Gross skb = queue->tx_skbs[id];
43321631d2dSJuergen Gross queue->tx_skbs[id] = NULL;
43431185df7SJuergen Gross if (unlikely(!gnttab_end_foreign_access_ref(
435c94b731dSJuergen Gross queue->grant_tx_ref[id]))) {
436a884daa6SJuergen Gross dev_alert(dev,
437a884daa6SJuergen Gross "Grant still in use by backend domain\n");
438a884daa6SJuergen Gross goto err;
4390d160211SJeremy Fitzhardinge }
4400d160211SJeremy Fitzhardinge gnttab_release_grant_reference(
4412688fcb7SAndrew J. Bennieston &queue->gref_tx_head, queue->grant_tx_ref[id]);
442145daab2SJuergen Gross queue->grant_tx_ref[id] = INVALID_GRANT_REF;
4432688fcb7SAndrew J. Bennieston queue->grant_tx_page[id] = NULL;
44421631d2dSJuergen Gross add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
4450d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb);
4460d160211SJeremy Fitzhardinge }
4470d160211SJeremy Fitzhardinge
4482688fcb7SAndrew J. Bennieston queue->tx.rsp_cons = prod;
4490d160211SJeremy Fitzhardinge
4507d0105b5SMalcolm Crossley RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
4517d0105b5SMalcolm Crossley } while (more_to_do);
4520d160211SJeremy Fitzhardinge
4532688fcb7SAndrew J. Bennieston xennet_maybe_wake_tx(queue);
454a884daa6SJuergen Gross
455b27d4795SJuergen Gross return work_done;
456a884daa6SJuergen Gross
457a884daa6SJuergen Gross err:
458a884daa6SJuergen Gross queue->info->broken = true;
459a884daa6SJuergen Gross dev_alert(dev, "Disabled for further use\n");
460b27d4795SJuergen Gross
461b27d4795SJuergen Gross return work_done;
4620d160211SJeremy Fitzhardinge }
4630d160211SJeremy Fitzhardinge
46430c5d7f0SJulien Grall struct xennet_gnttab_make_txreq {
46530c5d7f0SJulien Grall struct netfront_queue *queue;
46630c5d7f0SJulien Grall struct sk_buff *skb;
46730c5d7f0SJulien Grall struct page *page;
468162081ecSJuergen Gross struct xen_netif_tx_request *tx; /* Last request on ring page */
469162081ecSJuergen Gross struct xen_netif_tx_request tx_local; /* Last request local copy*/
47030c5d7f0SJulien Grall unsigned int size;
47130c5d7f0SJulien Grall };
47230c5d7f0SJulien Grall
xennet_tx_setup_grant(unsigned long gfn,unsigned int offset,unsigned int len,void * data)47330c5d7f0SJulien Grall static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
47430c5d7f0SJulien Grall unsigned int len, void *data)
4750d160211SJeremy Fitzhardinge {
47630c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data;
4770d160211SJeremy Fitzhardinge unsigned int id;
478a55e8bb8SDavid Vrabel struct xen_netif_tx_request *tx;
4790d160211SJeremy Fitzhardinge grant_ref_t ref;
48030c5d7f0SJulien Grall /* convenient aliases */
48130c5d7f0SJulien Grall struct page *page = info->page;
48230c5d7f0SJulien Grall struct netfront_queue *queue = info->queue;
48330c5d7f0SJulien Grall struct sk_buff *skb = info->skb;
4840d160211SJeremy Fitzhardinge
48521631d2dSJuergen Gross id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
486a55e8bb8SDavid Vrabel tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
4872688fcb7SAndrew J. Bennieston ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
488269ebce4SDongli Zhang WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
4890d160211SJeremy Fitzhardinge
49030c5d7f0SJulien Grall gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
49130c5d7f0SJulien Grall gfn, GNTMAP_readonly);
4920d160211SJeremy Fitzhardinge
49321631d2dSJuergen Gross queue->tx_skbs[id] = skb;
494a55e8bb8SDavid Vrabel queue->grant_tx_page[id] = page;
495a55e8bb8SDavid Vrabel queue->grant_tx_ref[id] = ref;
496a55e8bb8SDavid Vrabel
497162081ecSJuergen Gross info->tx_local.id = id;
498162081ecSJuergen Gross info->tx_local.gref = ref;
499162081ecSJuergen Gross info->tx_local.offset = offset;
500162081ecSJuergen Gross info->tx_local.size = len;
501162081ecSJuergen Gross info->tx_local.flags = 0;
502162081ecSJuergen Gross
503162081ecSJuergen Gross *tx = info->tx_local;
504a55e8bb8SDavid Vrabel
505a884daa6SJuergen Gross /*
506a884daa6SJuergen Gross * Put the request in the pending queue, it will be set to be pending
507a884daa6SJuergen Gross * when the producer index is about to be raised.
508a884daa6SJuergen Gross */
509a884daa6SJuergen Gross add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
510a884daa6SJuergen Gross
51130c5d7f0SJulien Grall info->tx = tx;
512162081ecSJuergen Gross info->size += info->tx_local.size;
51330c5d7f0SJulien Grall }
51430c5d7f0SJulien Grall
xennet_make_first_txreq(struct xennet_gnttab_make_txreq * info,unsigned int offset,unsigned int len)51530c5d7f0SJulien Grall static struct xen_netif_tx_request *xennet_make_first_txreq(
516162081ecSJuergen Gross struct xennet_gnttab_make_txreq *info,
517162081ecSJuergen Gross unsigned int offset, unsigned int len)
51830c5d7f0SJulien Grall {
519162081ecSJuergen Gross info->size = 0;
52030c5d7f0SJulien Grall
521162081ecSJuergen Gross gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
52230c5d7f0SJulien Grall
523162081ecSJuergen Gross return info->tx;
52430c5d7f0SJulien Grall }
52530c5d7f0SJulien Grall
xennet_make_one_txreq(unsigned long gfn,unsigned int offset,unsigned int len,void * data)52630c5d7f0SJulien Grall static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
52730c5d7f0SJulien Grall unsigned int len, void *data)
52830c5d7f0SJulien Grall {
52930c5d7f0SJulien Grall struct xennet_gnttab_make_txreq *info = data;
53030c5d7f0SJulien Grall
53130c5d7f0SJulien Grall info->tx->flags |= XEN_NETTXF_more_data;
53230c5d7f0SJulien Grall skb_get(info->skb);
53330c5d7f0SJulien Grall xennet_tx_setup_grant(gfn, offset, len, data);
5340d160211SJeremy Fitzhardinge }
5350d160211SJeremy Fitzhardinge
xennet_make_txreqs(struct xennet_gnttab_make_txreq * info,struct page * page,unsigned int offset,unsigned int len)536162081ecSJuergen Gross static void xennet_make_txreqs(
537162081ecSJuergen Gross struct xennet_gnttab_make_txreq *info,
538162081ecSJuergen Gross struct page *page,
539a55e8bb8SDavid Vrabel unsigned int offset, unsigned int len)
540a55e8bb8SDavid Vrabel {
541f36c3747SIan Campbell /* Skip unused frames from start of page */
542f36c3747SIan Campbell page += offset >> PAGE_SHIFT;
543f36c3747SIan Campbell offset &= ~PAGE_MASK;
544f36c3747SIan Campbell
545a55e8bb8SDavid Vrabel while (len) {
546162081ecSJuergen Gross info->page = page;
547162081ecSJuergen Gross info->size = 0;
54830c5d7f0SJulien Grall
54930c5d7f0SJulien Grall gnttab_foreach_grant_in_range(page, offset, len,
55030c5d7f0SJulien Grall xennet_make_one_txreq,
551162081ecSJuergen Gross info);
55230c5d7f0SJulien Grall
553f36c3747SIan Campbell page++;
554f36c3747SIan Campbell offset = 0;
555162081ecSJuergen Gross len -= info->size;
5560d160211SJeremy Fitzhardinge }
5570d160211SJeremy Fitzhardinge }
5580d160211SJeremy Fitzhardinge
559f36c3747SIan Campbell /*
560e84448d5SDavid Vrabel * Count how many ring slots are required to send this skb. Each frag
561e84448d5SDavid Vrabel * might be a compound page.
562f36c3747SIan Campbell */
xennet_count_skb_slots(struct sk_buff * skb)563e84448d5SDavid Vrabel static int xennet_count_skb_slots(struct sk_buff *skb)
564f36c3747SIan Campbell {
565f36c3747SIan Campbell int i, frags = skb_shinfo(skb)->nr_frags;
56630c5d7f0SJulien Grall int slots;
567e84448d5SDavid Vrabel
56830c5d7f0SJulien Grall slots = gnttab_count_grant(offset_in_page(skb->data),
56930c5d7f0SJulien Grall skb_headlen(skb));
570f36c3747SIan Campbell
571f36c3747SIan Campbell for (i = 0; i < frags; i++) {
572f36c3747SIan Campbell skb_frag_t *frag = skb_shinfo(skb)->frags + i;
573f36c3747SIan Campbell unsigned long size = skb_frag_size(frag);
574b54c9d5bSJonathan Lemon unsigned long offset = skb_frag_off(frag);
575f36c3747SIan Campbell
576f36c3747SIan Campbell /* Skip unused frames from start of page */
577f36c3747SIan Campbell offset &= ~PAGE_MASK;
578f36c3747SIan Campbell
57930c5d7f0SJulien Grall slots += gnttab_count_grant(offset, size);
580f36c3747SIan Campbell }
581f36c3747SIan Campbell
58230c5d7f0SJulien Grall return slots;
583f36c3747SIan Campbell }
584f36c3747SIan Campbell
xennet_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)58550ee6061SAndrew J. Bennieston static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
586a350ecceSPaolo Abeni struct net_device *sb_dev)
5872688fcb7SAndrew J. Bennieston {
58850ee6061SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues;
58950ee6061SAndrew J. Bennieston u32 hash;
59050ee6061SAndrew J. Bennieston u16 queue_idx;
59150ee6061SAndrew J. Bennieston
59250ee6061SAndrew J. Bennieston /* First, check if there is only one queue */
59350ee6061SAndrew J. Bennieston if (num_queues == 1) {
59450ee6061SAndrew J. Bennieston queue_idx = 0;
59550ee6061SAndrew J. Bennieston } else {
59650ee6061SAndrew J. Bennieston hash = skb_get_hash(skb);
59750ee6061SAndrew J. Bennieston queue_idx = hash % num_queues;
59850ee6061SAndrew J. Bennieston }
59950ee6061SAndrew J. Bennieston
60050ee6061SAndrew J. Bennieston return queue_idx;
6012688fcb7SAndrew J. Bennieston }
6022688fcb7SAndrew J. Bennieston
xennet_mark_tx_pending(struct netfront_queue * queue)603a884daa6SJuergen Gross static void xennet_mark_tx_pending(struct netfront_queue *queue)
604a884daa6SJuergen Gross {
605a884daa6SJuergen Gross unsigned int i;
606a884daa6SJuergen Gross
607a884daa6SJuergen Gross while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
608a884daa6SJuergen Gross TX_LINK_NONE)
609a884daa6SJuergen Gross queue->tx_link[i] = TX_PENDING;
610a884daa6SJuergen Gross }
611a884daa6SJuergen Gross
xennet_xdp_xmit_one(struct net_device * dev,struct netfront_queue * queue,struct xdp_frame * xdpf)6126c5aa6fcSDenis Kirjanov static int xennet_xdp_xmit_one(struct net_device *dev,
6136c5aa6fcSDenis Kirjanov struct netfront_queue *queue,
6146c5aa6fcSDenis Kirjanov struct xdp_frame *xdpf)
6156c5aa6fcSDenis Kirjanov {
6166c5aa6fcSDenis Kirjanov struct netfront_info *np = netdev_priv(dev);
6176c5aa6fcSDenis Kirjanov struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
618162081ecSJuergen Gross struct xennet_gnttab_make_txreq info = {
619162081ecSJuergen Gross .queue = queue,
620162081ecSJuergen Gross .skb = NULL,
621162081ecSJuergen Gross .page = virt_to_page(xdpf->data),
622162081ecSJuergen Gross };
6236c5aa6fcSDenis Kirjanov int notify;
6246c5aa6fcSDenis Kirjanov
625162081ecSJuergen Gross xennet_make_first_txreq(&info,
6266c5aa6fcSDenis Kirjanov offset_in_page(xdpf->data),
6276c5aa6fcSDenis Kirjanov xdpf->len);
6286c5aa6fcSDenis Kirjanov
629a884daa6SJuergen Gross xennet_mark_tx_pending(queue);
630a884daa6SJuergen Gross
6316c5aa6fcSDenis Kirjanov RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
6326c5aa6fcSDenis Kirjanov if (notify)
6336c5aa6fcSDenis Kirjanov notify_remote_via_irq(queue->tx_irq);
6346c5aa6fcSDenis Kirjanov
6356c5aa6fcSDenis Kirjanov u64_stats_update_begin(&tx_stats->syncp);
6366c5aa6fcSDenis Kirjanov tx_stats->bytes += xdpf->len;
6376c5aa6fcSDenis Kirjanov tx_stats->packets++;
6386c5aa6fcSDenis Kirjanov u64_stats_update_end(&tx_stats->syncp);
6396c5aa6fcSDenis Kirjanov
6406c5aa6fcSDenis Kirjanov xennet_tx_buf_gc(queue);
6416c5aa6fcSDenis Kirjanov
6426c5aa6fcSDenis Kirjanov return 0;
6436c5aa6fcSDenis Kirjanov }
6446c5aa6fcSDenis Kirjanov
xennet_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)6456c5aa6fcSDenis Kirjanov static int xennet_xdp_xmit(struct net_device *dev, int n,
6466c5aa6fcSDenis Kirjanov struct xdp_frame **frames, u32 flags)
6476c5aa6fcSDenis Kirjanov {
6486c5aa6fcSDenis Kirjanov unsigned int num_queues = dev->real_num_tx_queues;
6496c5aa6fcSDenis Kirjanov struct netfront_info *np = netdev_priv(dev);
6506c5aa6fcSDenis Kirjanov struct netfront_queue *queue = NULL;
6516c5aa6fcSDenis Kirjanov unsigned long irq_flags;
652fdc13979SLorenzo Bianconi int nxmit = 0;
653fdc13979SLorenzo Bianconi int i;
6546c5aa6fcSDenis Kirjanov
655a884daa6SJuergen Gross if (unlikely(np->broken))
656a884daa6SJuergen Gross return -ENODEV;
6576c5aa6fcSDenis Kirjanov if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6586c5aa6fcSDenis Kirjanov return -EINVAL;
6596c5aa6fcSDenis Kirjanov
6606c5aa6fcSDenis Kirjanov queue = &np->queues[smp_processor_id() % num_queues];
6616c5aa6fcSDenis Kirjanov
6626c5aa6fcSDenis Kirjanov spin_lock_irqsave(&queue->tx_lock, irq_flags);
6636c5aa6fcSDenis Kirjanov for (i = 0; i < n; i++) {
6646c5aa6fcSDenis Kirjanov struct xdp_frame *xdpf = frames[i];
6656c5aa6fcSDenis Kirjanov
6666c5aa6fcSDenis Kirjanov if (!xdpf)
6676c5aa6fcSDenis Kirjanov continue;
668fdc13979SLorenzo Bianconi if (xennet_xdp_xmit_one(dev, queue, xdpf))
669fdc13979SLorenzo Bianconi break;
670fdc13979SLorenzo Bianconi nxmit++;
6716c5aa6fcSDenis Kirjanov }
6726c5aa6fcSDenis Kirjanov spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
6736c5aa6fcSDenis Kirjanov
674fdc13979SLorenzo Bianconi return nxmit;
6756c5aa6fcSDenis Kirjanov }
6766c5aa6fcSDenis Kirjanov
bounce_skb(const struct sk_buff * skb)67753ff2517Sruanjinjie static struct sk_buff *bounce_skb(const struct sk_buff *skb)
6784491001cSRoger Pau Monne {
6794491001cSRoger Pau Monne unsigned int headerlen = skb_headroom(skb);
6804491001cSRoger Pau Monne /* Align size to allocate full pages and avoid contiguous data leaks */
6814491001cSRoger Pau Monne unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
6824491001cSRoger Pau Monne XEN_PAGE_SIZE);
6834491001cSRoger Pau Monne struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
6844491001cSRoger Pau Monne
6854491001cSRoger Pau Monne if (!n)
6864491001cSRoger Pau Monne return NULL;
6874491001cSRoger Pau Monne
6884491001cSRoger Pau Monne if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
6894491001cSRoger Pau Monne WARN_ONCE(1, "misaligned skb allocated\n");
6904491001cSRoger Pau Monne kfree_skb(n);
6914491001cSRoger Pau Monne return NULL;
6924491001cSRoger Pau Monne }
6934491001cSRoger Pau Monne
6944491001cSRoger Pau Monne /* Set the data pointer */
6954491001cSRoger Pau Monne skb_reserve(n, headerlen);
6964491001cSRoger Pau Monne /* Set the tail pointer and length */
6974491001cSRoger Pau Monne skb_put(n, skb->len);
6984491001cSRoger Pau Monne
6994491001cSRoger Pau Monne BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
7004491001cSRoger Pau Monne
7014491001cSRoger Pau Monne skb_copy_header(n, skb);
7024491001cSRoger Pau Monne return n;
7034491001cSRoger Pau Monne }
7046c5aa6fcSDenis Kirjanov
70530c5d7f0SJulien Grall #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
70630c5d7f0SJulien Grall
xennet_start_xmit(struct sk_buff * skb,struct net_device * dev)70724a94b3cSLuc Van Oostenryck static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
7080d160211SJeremy Fitzhardinge {
7090d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev);
710900e1833SDavid Vrabel struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
711162081ecSJuergen Gross struct xen_netif_tx_request *first_tx;
712a55e8bb8SDavid Vrabel unsigned int i;
7130d160211SJeremy Fitzhardinge int notify;
714f36c3747SIan Campbell int slots;
715a55e8bb8SDavid Vrabel struct page *page;
716a55e8bb8SDavid Vrabel unsigned int offset;
717a55e8bb8SDavid Vrabel unsigned int len;
718cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags;
7192688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL;
720162081ecSJuergen Gross struct xennet_gnttab_make_txreq info = { };
7212688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues;
7222688fcb7SAndrew J. Bennieston u16 queue_index;
723fd07160bSVitaly Kuznetsov struct sk_buff *nskb;
7242688fcb7SAndrew J. Bennieston
7252688fcb7SAndrew J. Bennieston /* Drop the packet if no queues are set up */
7262688fcb7SAndrew J. Bennieston if (num_queues < 1)
7272688fcb7SAndrew J. Bennieston goto drop;
728a884daa6SJuergen Gross if (unlikely(np->broken))
729a884daa6SJuergen Gross goto drop;
7302688fcb7SAndrew J. Bennieston /* Determine which queue to transmit this SKB on */
7312688fcb7SAndrew J. Bennieston queue_index = skb_get_queue_mapping(skb);
7322688fcb7SAndrew J. Bennieston queue = &np->queues[queue_index];
7330d160211SJeremy Fitzhardinge
7349ecd1a75SWei Liu /* If skb->len is too big for wire format, drop skb and alert
7359ecd1a75SWei Liu * user about misconfiguration.
7369ecd1a75SWei Liu */
7379ecd1a75SWei Liu if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
7389ecd1a75SWei Liu net_alert_ratelimited(
7399ecd1a75SWei Liu "xennet: skb->len = %u, too big for wire format\n",
7409ecd1a75SWei Liu skb->len);
7419ecd1a75SWei Liu goto drop;
7429ecd1a75SWei Liu }
7439ecd1a75SWei Liu
744e84448d5SDavid Vrabel slots = xennet_count_skb_slots(skb);
74530c5d7f0SJulien Grall if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
74697a6d1bbSZoltan Kiss net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
74797a6d1bbSZoltan Kiss slots, skb->len);
74897a6d1bbSZoltan Kiss if (skb_linearize(skb))
7490d160211SJeremy Fitzhardinge goto drop;
7500d160211SJeremy Fitzhardinge }
7510d160211SJeremy Fitzhardinge
752a55e8bb8SDavid Vrabel page = virt_to_page(skb->data);
753a55e8bb8SDavid Vrabel offset = offset_in_page(skb->data);
754fd07160bSVitaly Kuznetsov
755fd07160bSVitaly Kuznetsov /* The first req should be at least ETH_HLEN size or the packet will be
756fd07160bSVitaly Kuznetsov * dropped by netback.
7574491001cSRoger Pau Monne *
7584491001cSRoger Pau Monne * If the backend is not trusted bounce all data to zeroed pages to
7594491001cSRoger Pau Monne * avoid exposing contiguous data on the granted page not belonging to
7604491001cSRoger Pau Monne * the skb.
761fd07160bSVitaly Kuznetsov */
7624491001cSRoger Pau Monne if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
7634491001cSRoger Pau Monne nskb = bounce_skb(skb);
764fd07160bSVitaly Kuznetsov if (!nskb)
765fd07160bSVitaly Kuznetsov goto drop;
76662f3250fSEric Dumazet dev_consume_skb_any(skb);
767fd07160bSVitaly Kuznetsov skb = nskb;
768fd07160bSVitaly Kuznetsov page = virt_to_page(skb->data);
769fd07160bSVitaly Kuznetsov offset = offset_in_page(skb->data);
770fd07160bSVitaly Kuznetsov }
771fd07160bSVitaly Kuznetsov
772a55e8bb8SDavid Vrabel len = skb_headlen(skb);
773a55e8bb8SDavid Vrabel
7742688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags);
7750d160211SJeremy Fitzhardinge
7760d160211SJeremy Fitzhardinge if (unlikely(!netif_carrier_ok(dev) ||
777f36c3747SIan Campbell (slots > 1 && !xennet_can_sg(dev)) ||
7788b86a61dSJohannes Berg netif_needs_gso(skb, netif_skb_features(skb)))) {
7792688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags);
7800d160211SJeremy Fitzhardinge goto drop;
7810d160211SJeremy Fitzhardinge }
7820d160211SJeremy Fitzhardinge
783a55e8bb8SDavid Vrabel /* First request for the linear area. */
784162081ecSJuergen Gross info.queue = queue;
785162081ecSJuergen Gross info.skb = skb;
786162081ecSJuergen Gross info.page = page;
787162081ecSJuergen Gross first_tx = xennet_make_first_txreq(&info, offset, len);
788162081ecSJuergen Gross offset += info.tx_local.size;
78930c5d7f0SJulien Grall if (offset == PAGE_SIZE) {
790a55e8bb8SDavid Vrabel page++;
791a55e8bb8SDavid Vrabel offset = 0;
79230c5d7f0SJulien Grall }
793162081ecSJuergen Gross len -= info.tx_local.size;
7940d160211SJeremy Fitzhardinge
7950d160211SJeremy Fitzhardinge if (skb->ip_summed == CHECKSUM_PARTIAL)
7960d160211SJeremy Fitzhardinge /* local packet? */
797162081ecSJuergen Gross first_tx->flags |= XEN_NETTXF_csum_blank |
798162081ecSJuergen Gross XEN_NETTXF_data_validated;
7990d160211SJeremy Fitzhardinge else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
8000d160211SJeremy Fitzhardinge /* remote but checksummed. */
801162081ecSJuergen Gross first_tx->flags |= XEN_NETTXF_data_validated;
8020d160211SJeremy Fitzhardinge
803a55e8bb8SDavid Vrabel /* Optional extra info after the first request. */
8040d160211SJeremy Fitzhardinge if (skb_shinfo(skb)->gso_size) {
8050d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso;
8060d160211SJeremy Fitzhardinge
8070d160211SJeremy Fitzhardinge gso = (struct xen_netif_extra_info *)
808a55e8bb8SDavid Vrabel RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
8090d160211SJeremy Fitzhardinge
810162081ecSJuergen Gross first_tx->flags |= XEN_NETTXF_extra_info;
8110d160211SJeremy Fitzhardinge
8120d160211SJeremy Fitzhardinge gso->u.gso.size = skb_shinfo(skb)->gso_size;
8132c0057deSPaul Durrant gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
8142c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV6 :
8152c0057deSPaul Durrant XEN_NETIF_GSO_TYPE_TCPV4;
8160d160211SJeremy Fitzhardinge gso->u.gso.pad = 0;
8170d160211SJeremy Fitzhardinge gso->u.gso.features = 0;
8180d160211SJeremy Fitzhardinge
8190d160211SJeremy Fitzhardinge gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
8200d160211SJeremy Fitzhardinge gso->flags = 0;
8210d160211SJeremy Fitzhardinge }
8220d160211SJeremy Fitzhardinge
823a55e8bb8SDavid Vrabel /* Requests for the rest of the linear area. */
824162081ecSJuergen Gross xennet_make_txreqs(&info, page, offset, len);
8250d160211SJeremy Fitzhardinge
826a55e8bb8SDavid Vrabel /* Requests for all the frags. */
827a55e8bb8SDavid Vrabel for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
828a55e8bb8SDavid Vrabel skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
829162081ecSJuergen Gross xennet_make_txreqs(&info, skb_frag_page(frag),
830b54c9d5bSJonathan Lemon skb_frag_off(frag),
831a55e8bb8SDavid Vrabel skb_frag_size(frag));
832a55e8bb8SDavid Vrabel }
833a55e8bb8SDavid Vrabel
834a55e8bb8SDavid Vrabel /* First request has the packet length. */
835a55e8bb8SDavid Vrabel first_tx->size = skb->len;
8360d160211SJeremy Fitzhardinge
83791ffb9d3SDaniel Drown /* timestamp packet in software */
83891ffb9d3SDaniel Drown skb_tx_timestamp(skb);
83991ffb9d3SDaniel Drown
840a884daa6SJuergen Gross xennet_mark_tx_pending(queue);
841a884daa6SJuergen Gross
8422688fcb7SAndrew J. Bennieston RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
8430d160211SJeremy Fitzhardinge if (notify)
8442688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq);
8450d160211SJeremy Fitzhardinge
846900e1833SDavid Vrabel u64_stats_update_begin(&tx_stats->syncp);
847900e1833SDavid Vrabel tx_stats->bytes += skb->len;
848900e1833SDavid Vrabel tx_stats->packets++;
849900e1833SDavid Vrabel u64_stats_update_end(&tx_stats->syncp);
85010a273a6SJeremy Fitzhardinge
85110a273a6SJeremy Fitzhardinge /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
8522688fcb7SAndrew J. Bennieston xennet_tx_buf_gc(queue);
8530d160211SJeremy Fitzhardinge
8542688fcb7SAndrew J. Bennieston if (!netfront_tx_slot_available(queue))
8552688fcb7SAndrew J. Bennieston netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
8560d160211SJeremy Fitzhardinge
8572688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags);
8580d160211SJeremy Fitzhardinge
8596ed10654SPatrick McHardy return NETDEV_TX_OK;
8600d160211SJeremy Fitzhardinge
8610d160211SJeremy Fitzhardinge drop:
86209f75cd7SJeff Garzik dev->stats.tx_dropped++;
863979de8a0SEric W. Biederman dev_kfree_skb_any(skb);
8646ed10654SPatrick McHardy return NETDEV_TX_OK;
8650d160211SJeremy Fitzhardinge }
8660d160211SJeremy Fitzhardinge
xennet_close(struct net_device * dev)8670d160211SJeremy Fitzhardinge static int xennet_close(struct net_device *dev)
8680d160211SJeremy Fitzhardinge {
8690d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev);
870*fe9a8f52SJuergen Gross unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
8712688fcb7SAndrew J. Bennieston unsigned int i;
8722688fcb7SAndrew J. Bennieston struct netfront_queue *queue;
8732688fcb7SAndrew J. Bennieston netif_tx_stop_all_queues(np->netdev);
8742688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) {
8752688fcb7SAndrew J. Bennieston queue = &np->queues[i];
8762688fcb7SAndrew J. Bennieston napi_disable(&queue->napi);
8772688fcb7SAndrew J. Bennieston }
8780d160211SJeremy Fitzhardinge return 0;
8790d160211SJeremy Fitzhardinge }
8800d160211SJeremy Fitzhardinge
xennet_destroy_queues(struct netfront_info * info)881dcf4ff7aSMarek Marczykowski-Górecki static void xennet_destroy_queues(struct netfront_info *info)
882dcf4ff7aSMarek Marczykowski-Górecki {
883dcf4ff7aSMarek Marczykowski-Górecki unsigned int i;
884dcf4ff7aSMarek Marczykowski-Górecki
885*fe9a8f52SJuergen Gross if (!info->queues)
886*fe9a8f52SJuergen Gross return;
887*fe9a8f52SJuergen Gross
888dcf4ff7aSMarek Marczykowski-Górecki for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
889dcf4ff7aSMarek Marczykowski-Górecki struct netfront_queue *queue = &info->queues[i];
890dcf4ff7aSMarek Marczykowski-Górecki
891dcf4ff7aSMarek Marczykowski-Górecki if (netif_running(info->netdev))
892dcf4ff7aSMarek Marczykowski-Górecki napi_disable(&queue->napi);
893dcf4ff7aSMarek Marczykowski-Górecki netif_napi_del(&queue->napi);
894dcf4ff7aSMarek Marczykowski-Górecki }
895dcf4ff7aSMarek Marczykowski-Górecki
896dcf4ff7aSMarek Marczykowski-Górecki kfree(info->queues);
897dcf4ff7aSMarek Marczykowski-Górecki info->queues = NULL;
898dcf4ff7aSMarek Marczykowski-Górecki }
899dcf4ff7aSMarek Marczykowski-Górecki
xennet_uninit(struct net_device * dev)900dcf4ff7aSMarek Marczykowski-Górecki static void xennet_uninit(struct net_device *dev)
901dcf4ff7aSMarek Marczykowski-Górecki {
902dcf4ff7aSMarek Marczykowski-Górecki struct netfront_info *np = netdev_priv(dev);
903dcf4ff7aSMarek Marczykowski-Górecki xennet_destroy_queues(np);
904dcf4ff7aSMarek Marczykowski-Górecki }
905dcf4ff7aSMarek Marczykowski-Górecki
xennet_set_rx_rsp_cons(struct netfront_queue * queue,RING_IDX val)906b27d4795SJuergen Gross static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
907b27d4795SJuergen Gross {
908b27d4795SJuergen Gross unsigned long flags;
909b27d4795SJuergen Gross
910b27d4795SJuergen Gross spin_lock_irqsave(&queue->rx_cons_lock, flags);
911b27d4795SJuergen Gross queue->rx.rsp_cons = val;
9126fac592cSJuergen Gross queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
913b27d4795SJuergen Gross spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
914b27d4795SJuergen Gross }
915b27d4795SJuergen Gross
xennet_move_rx_slot(struct netfront_queue * queue,struct sk_buff * skb,grant_ref_t ref)9162688fcb7SAndrew J. Bennieston static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
9170d160211SJeremy Fitzhardinge grant_ref_t ref)
9180d160211SJeremy Fitzhardinge {
9192688fcb7SAndrew J. Bennieston int new = xennet_rxidx(queue->rx.req_prod_pvt);
9200d160211SJeremy Fitzhardinge
9212688fcb7SAndrew J. Bennieston BUG_ON(queue->rx_skbs[new]);
9222688fcb7SAndrew J. Bennieston queue->rx_skbs[new] = skb;
9232688fcb7SAndrew J. Bennieston queue->grant_rx_ref[new] = ref;
9242688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
9252688fcb7SAndrew J. Bennieston RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
9262688fcb7SAndrew J. Bennieston queue->rx.req_prod_pvt++;
9270d160211SJeremy Fitzhardinge }
9280d160211SJeremy Fitzhardinge
xennet_get_extras(struct netfront_queue * queue,struct xen_netif_extra_info * extras,RING_IDX rp)9292688fcb7SAndrew J. Bennieston static int xennet_get_extras(struct netfront_queue *queue,
9300d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras,
9310d160211SJeremy Fitzhardinge RING_IDX rp)
9320d160211SJeremy Fitzhardinge
9330d160211SJeremy Fitzhardinge {
9348446066bSJuergen Gross struct xen_netif_extra_info extra;
9352688fcb7SAndrew J. Bennieston struct device *dev = &queue->info->netdev->dev;
9362688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons;
9370d160211SJeremy Fitzhardinge int err = 0;
9380d160211SJeremy Fitzhardinge
9390d160211SJeremy Fitzhardinge do {
9400d160211SJeremy Fitzhardinge struct sk_buff *skb;
9410d160211SJeremy Fitzhardinge grant_ref_t ref;
9420d160211SJeremy Fitzhardinge
9430d160211SJeremy Fitzhardinge if (unlikely(cons + 1 == rp)) {
9440d160211SJeremy Fitzhardinge if (net_ratelimit())
9450d160211SJeremy Fitzhardinge dev_warn(dev, "Missing extra info\n");
9460d160211SJeremy Fitzhardinge err = -EBADR;
9470d160211SJeremy Fitzhardinge break;
9480d160211SJeremy Fitzhardinge }
9490d160211SJeremy Fitzhardinge
9508446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
9510d160211SJeremy Fitzhardinge
9528446066bSJuergen Gross if (unlikely(!extra.type ||
9538446066bSJuergen Gross extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
9540d160211SJeremy Fitzhardinge if (net_ratelimit())
9550d160211SJeremy Fitzhardinge dev_warn(dev, "Invalid extra type: %d\n",
9568446066bSJuergen Gross extra.type);
9570d160211SJeremy Fitzhardinge err = -EINVAL;
9580d160211SJeremy Fitzhardinge } else {
9598446066bSJuergen Gross extras[extra.type - 1] = extra;
9600d160211SJeremy Fitzhardinge }
9610d160211SJeremy Fitzhardinge
9622688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons);
9632688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons);
9642688fcb7SAndrew J. Bennieston xennet_move_rx_slot(queue, skb, ref);
9658446066bSJuergen Gross } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
9660d160211SJeremy Fitzhardinge
967b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, cons);
9680d160211SJeremy Fitzhardinge return err;
9690d160211SJeremy Fitzhardinge }
9700d160211SJeremy Fitzhardinge
xennet_run_xdp(struct netfront_queue * queue,struct page * pdata,struct xen_netif_rx_response * rx,struct bpf_prog * prog,struct xdp_buff * xdp,bool * need_xdp_flush)9716c5aa6fcSDenis Kirjanov static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
9726c5aa6fcSDenis Kirjanov struct xen_netif_rx_response *rx, struct bpf_prog *prog,
9736c5aa6fcSDenis Kirjanov struct xdp_buff *xdp, bool *need_xdp_flush)
9746c5aa6fcSDenis Kirjanov {
9756c5aa6fcSDenis Kirjanov struct xdp_frame *xdpf;
9766c5aa6fcSDenis Kirjanov u32 len = rx->status;
977e44f65fdSColin Ian King u32 act;
9786c5aa6fcSDenis Kirjanov int err;
9796c5aa6fcSDenis Kirjanov
98043b5169dSLorenzo Bianconi xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
98143b5169dSLorenzo Bianconi &queue->xdp_rxq);
982be9df4afSLorenzo Bianconi xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
983be9df4afSLorenzo Bianconi len, false);
9846c5aa6fcSDenis Kirjanov
9856c5aa6fcSDenis Kirjanov act = bpf_prog_run_xdp(prog, xdp);
9866c5aa6fcSDenis Kirjanov switch (act) {
9876c5aa6fcSDenis Kirjanov case XDP_TX:
9886c5aa6fcSDenis Kirjanov get_page(pdata);
9896c5aa6fcSDenis Kirjanov xdpf = xdp_convert_buff_to_frame(xdp);
9906c5aa6fcSDenis Kirjanov err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
991fdc13979SLorenzo Bianconi if (unlikely(!err))
992fdc13979SLorenzo Bianconi xdp_return_frame_rx_napi(xdpf);
993fdc13979SLorenzo Bianconi else if (unlikely(err < 0))
9946c5aa6fcSDenis Kirjanov trace_xdp_exception(queue->info->netdev, prog, act);
9956c5aa6fcSDenis Kirjanov break;
9966c5aa6fcSDenis Kirjanov case XDP_REDIRECT:
9976c5aa6fcSDenis Kirjanov get_page(pdata);
9986c5aa6fcSDenis Kirjanov err = xdp_do_redirect(queue->info->netdev, xdp, prog);
9996c5aa6fcSDenis Kirjanov *need_xdp_flush = true;
10006c5aa6fcSDenis Kirjanov if (unlikely(err))
10016c5aa6fcSDenis Kirjanov trace_xdp_exception(queue->info->netdev, prog, act);
10026c5aa6fcSDenis Kirjanov break;
10036c5aa6fcSDenis Kirjanov case XDP_PASS:
10046c5aa6fcSDenis Kirjanov case XDP_DROP:
10056c5aa6fcSDenis Kirjanov break;
10066c5aa6fcSDenis Kirjanov
10076c5aa6fcSDenis Kirjanov case XDP_ABORTED:
10086c5aa6fcSDenis Kirjanov trace_xdp_exception(queue->info->netdev, prog, act);
10096c5aa6fcSDenis Kirjanov break;
10106c5aa6fcSDenis Kirjanov
10116c5aa6fcSDenis Kirjanov default:
1012c8064e5bSPaolo Abeni bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
10136c5aa6fcSDenis Kirjanov }
10146c5aa6fcSDenis Kirjanov
10156c5aa6fcSDenis Kirjanov return act;
10166c5aa6fcSDenis Kirjanov }
10176c5aa6fcSDenis Kirjanov
xennet_get_responses(struct netfront_queue * queue,struct netfront_rx_info * rinfo,RING_IDX rp,struct sk_buff_head * list,bool * need_xdp_flush)10182688fcb7SAndrew J. Bennieston static int xennet_get_responses(struct netfront_queue *queue,
10190d160211SJeremy Fitzhardinge struct netfront_rx_info *rinfo, RING_IDX rp,
10206c5aa6fcSDenis Kirjanov struct sk_buff_head *list,
10216c5aa6fcSDenis Kirjanov bool *need_xdp_flush)
10220d160211SJeremy Fitzhardinge {
10238446066bSJuergen Gross struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
10246c5aa6fcSDenis Kirjanov int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
10252688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons;
10262688fcb7SAndrew J. Bennieston struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
10276c5aa6fcSDenis Kirjanov struct xen_netif_extra_info *extras = rinfo->extras;
10282688fcb7SAndrew J. Bennieston grant_ref_t ref = xennet_get_rx_ref(queue, cons);
10296c5aa6fcSDenis Kirjanov struct device *dev = &queue->info->netdev->dev;
10306c5aa6fcSDenis Kirjanov struct bpf_prog *xdp_prog;
10316c5aa6fcSDenis Kirjanov struct xdp_buff xdp;
10327158ff6dSWei Liu int slots = 1;
10330d160211SJeremy Fitzhardinge int err = 0;
10346c5aa6fcSDenis Kirjanov u32 verdict;
10350d160211SJeremy Fitzhardinge
1036f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_extra_info) {
10372688fcb7SAndrew J. Bennieston err = xennet_get_extras(queue, extras, rp);
10386c5aa6fcSDenis Kirjanov if (!err) {
10396c5aa6fcSDenis Kirjanov if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
10406c5aa6fcSDenis Kirjanov struct xen_netif_extra_info *xdp;
10416c5aa6fcSDenis Kirjanov
10426c5aa6fcSDenis Kirjanov xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
10436c5aa6fcSDenis Kirjanov rx->offset = xdp->u.xdp.headroom;
10446c5aa6fcSDenis Kirjanov }
10456c5aa6fcSDenis Kirjanov }
10462688fcb7SAndrew J. Bennieston cons = queue->rx.rsp_cons;
10470d160211SJeremy Fitzhardinge }
10480d160211SJeremy Fitzhardinge
10490d160211SJeremy Fitzhardinge for (;;) {
10500d160211SJeremy Fitzhardinge /*
10510d160211SJeremy Fitzhardinge * This definitely indicates a bug, either in this driver or in
10520d160211SJeremy Fitzhardinge * the backend driver. In future this should flag the bad
1053697089dcSWei Liu * situation to the system controller to reboot the backend.
10540d160211SJeremy Fitzhardinge */
1055145daab2SJuergen Gross if (ref == INVALID_GRANT_REF) {
10560d160211SJeremy Fitzhardinge if (net_ratelimit())
10570d160211SJeremy Fitzhardinge dev_warn(dev, "Bad rx response id %d.\n",
10580d160211SJeremy Fitzhardinge rx->id);
10590d160211SJeremy Fitzhardinge err = -EINVAL;
10600d160211SJeremy Fitzhardinge goto next;
10610d160211SJeremy Fitzhardinge }
10620d160211SJeremy Fitzhardinge
106306673c21SJan Beulich if (unlikely(rx->status < 0 ||
106406673c21SJan Beulich rx->offset + rx->status > XEN_PAGE_SIZE)) {
106506673c21SJan Beulich if (net_ratelimit())
106606673c21SJan Beulich dev_warn(dev, "rx->offset: %u, size: %d\n",
106706673c21SJan Beulich rx->offset, rx->status);
106806673c21SJan Beulich xennet_move_rx_slot(queue, skb, ref);
106906673c21SJan Beulich err = -EINVAL;
107006673c21SJan Beulich goto next;
107106673c21SJan Beulich }
107206673c21SJan Beulich
1073c94b731dSJuergen Gross if (!gnttab_end_foreign_access_ref(ref)) {
107466e3531bSJuergen Gross dev_alert(dev,
107566e3531bSJuergen Gross "Grant still in use by backend domain\n");
107666e3531bSJuergen Gross queue->info->broken = true;
107766e3531bSJuergen Gross dev_alert(dev, "Disabled for further use\n");
107866e3531bSJuergen Gross return -EINVAL;
107966e3531bSJuergen Gross }
10800d160211SJeremy Fitzhardinge
10812688fcb7SAndrew J. Bennieston gnttab_release_grant_reference(&queue->gref_rx_head, ref);
10820d160211SJeremy Fitzhardinge
10836c5aa6fcSDenis Kirjanov rcu_read_lock();
10846c5aa6fcSDenis Kirjanov xdp_prog = rcu_dereference(queue->xdp_prog);
10856c5aa6fcSDenis Kirjanov if (xdp_prog) {
10866c5aa6fcSDenis Kirjanov if (!(rx->flags & XEN_NETRXF_more_data)) {
10876c5aa6fcSDenis Kirjanov /* currently only a single page contains data */
10886c5aa6fcSDenis Kirjanov verdict = xennet_run_xdp(queue,
10896c5aa6fcSDenis Kirjanov skb_frag_page(&skb_shinfo(skb)->frags[0]),
10906c5aa6fcSDenis Kirjanov rx, xdp_prog, &xdp, need_xdp_flush);
10916c5aa6fcSDenis Kirjanov if (verdict != XDP_PASS)
10926c5aa6fcSDenis Kirjanov err = -EINVAL;
10936c5aa6fcSDenis Kirjanov } else {
10946c5aa6fcSDenis Kirjanov /* drop the frame */
10956c5aa6fcSDenis Kirjanov err = -EINVAL;
10966c5aa6fcSDenis Kirjanov }
10976c5aa6fcSDenis Kirjanov }
10986c5aa6fcSDenis Kirjanov rcu_read_unlock();
1099f63c2c20SJan Beulich
11006c5aa6fcSDenis Kirjanov __skb_queue_tail(list, skb);
1101f63c2c20SJan Beulich
1102f63c2c20SJan Beulich next:
1103f942dc25SIan Campbell if (!(rx->flags & XEN_NETRXF_more_data))
11040d160211SJeremy Fitzhardinge break;
11050d160211SJeremy Fitzhardinge
11067158ff6dSWei Liu if (cons + slots == rp) {
11070d160211SJeremy Fitzhardinge if (net_ratelimit())
11087158ff6dSWei Liu dev_warn(dev, "Need more slots\n");
11090d160211SJeremy Fitzhardinge err = -ENOENT;
11100d160211SJeremy Fitzhardinge break;
11110d160211SJeremy Fitzhardinge }
11120d160211SJeremy Fitzhardinge
11138446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
11148446066bSJuergen Gross rx = &rx_local;
11152688fcb7SAndrew J. Bennieston skb = xennet_get_rx_skb(queue, cons + slots);
11162688fcb7SAndrew J. Bennieston ref = xennet_get_rx_ref(queue, cons + slots);
11177158ff6dSWei Liu slots++;
11180d160211SJeremy Fitzhardinge }
11190d160211SJeremy Fitzhardinge
11207158ff6dSWei Liu if (unlikely(slots > max)) {
11210d160211SJeremy Fitzhardinge if (net_ratelimit())
1122697089dcSWei Liu dev_warn(dev, "Too many slots\n");
11230d160211SJeremy Fitzhardinge err = -E2BIG;
11240d160211SJeremy Fitzhardinge }
11250d160211SJeremy Fitzhardinge
11260d160211SJeremy Fitzhardinge if (unlikely(err))
1127b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, cons + slots);
11280d160211SJeremy Fitzhardinge
11290d160211SJeremy Fitzhardinge return err;
11300d160211SJeremy Fitzhardinge }
11310d160211SJeremy Fitzhardinge
xennet_set_skb_gso(struct sk_buff * skb,struct xen_netif_extra_info * gso)11320d160211SJeremy Fitzhardinge static int xennet_set_skb_gso(struct sk_buff *skb,
11330d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso)
11340d160211SJeremy Fitzhardinge {
11350d160211SJeremy Fitzhardinge if (!gso->u.gso.size) {
11360d160211SJeremy Fitzhardinge if (net_ratelimit())
1137383eda32SJoe Perches pr_warn("GSO size must not be zero\n");
11380d160211SJeremy Fitzhardinge return -EINVAL;
11390d160211SJeremy Fitzhardinge }
11400d160211SJeremy Fitzhardinge
11412c0057deSPaul Durrant if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
11422c0057deSPaul Durrant gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
11430d160211SJeremy Fitzhardinge if (net_ratelimit())
1144383eda32SJoe Perches pr_warn("Bad GSO type %d\n", gso->u.gso.type);
11450d160211SJeremy Fitzhardinge return -EINVAL;
11460d160211SJeremy Fitzhardinge }
11470d160211SJeremy Fitzhardinge
11480d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_size = gso->u.gso.size;
11492c0057deSPaul Durrant skb_shinfo(skb)->gso_type =
11502c0057deSPaul Durrant (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
11512c0057deSPaul Durrant SKB_GSO_TCPV4 :
11522c0057deSPaul Durrant SKB_GSO_TCPV6;
11530d160211SJeremy Fitzhardinge
11540d160211SJeremy Fitzhardinge /* Header must be checked, and gso_segs computed. */
11550d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
11560d160211SJeremy Fitzhardinge skb_shinfo(skb)->gso_segs = 0;
11570d160211SJeremy Fitzhardinge
11580d160211SJeremy Fitzhardinge return 0;
11590d160211SJeremy Fitzhardinge }
11600d160211SJeremy Fitzhardinge
xennet_fill_frags(struct netfront_queue * queue,struct sk_buff * skb,struct sk_buff_head * list)1161a761129eSDongli Zhang static int xennet_fill_frags(struct netfront_queue *queue,
11620d160211SJeremy Fitzhardinge struct sk_buff *skb,
11630d160211SJeremy Fitzhardinge struct sk_buff_head *list)
11640d160211SJeremy Fitzhardinge {
11652688fcb7SAndrew J. Bennieston RING_IDX cons = queue->rx.rsp_cons;
11660d160211SJeremy Fitzhardinge struct sk_buff *nskb;
11670d160211SJeremy Fitzhardinge
11680d160211SJeremy Fitzhardinge while ((nskb = __skb_dequeue(list))) {
11698446066bSJuergen Gross struct xen_netif_rx_response rx;
117001c68026SIan Campbell skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
11710d160211SJeremy Fitzhardinge
11728446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
11738446066bSJuergen Gross
1174d472b3a6SJuergen Gross if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1175093b9c71SJan Beulich unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
11760d160211SJeremy Fitzhardinge
1177d81c5054SJuergen Gross BUG_ON(pull_to < skb_headlen(skb));
1178093b9c71SJan Beulich __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1179093b9c71SJan Beulich }
1180ad4f15dcSJuergen Gross if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1181b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue,
1182b27d4795SJuergen Gross ++cons + skb_queue_len(list));
1183ad4f15dcSJuergen Gross kfree_skb(nskb);
1184a761129eSDongli Zhang return -ENOENT;
1185ad4f15dcSJuergen Gross }
1186093b9c71SJan Beulich
1187d472b3a6SJuergen Gross skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1188d472b3a6SJuergen Gross skb_frag_page(nfrag),
11898446066bSJuergen Gross rx.offset, rx.status, PAGE_SIZE);
11900d160211SJeremy Fitzhardinge
11910d160211SJeremy Fitzhardinge skb_shinfo(nskb)->nr_frags = 0;
11920d160211SJeremy Fitzhardinge kfree_skb(nskb);
11930d160211SJeremy Fitzhardinge }
11940d160211SJeremy Fitzhardinge
1195b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, cons);
1196a761129eSDongli Zhang
1197a761129eSDongli Zhang return 0;
11980d160211SJeremy Fitzhardinge }
11990d160211SJeremy Fitzhardinge
checksum_setup(struct net_device * dev,struct sk_buff * skb)1200e0ce4af9SIan Campbell static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
12010d160211SJeremy Fitzhardinge {
1202b5cf66cdSPaul Durrant bool recalculate_partial_csum = false;
1203e0ce4af9SIan Campbell
1204e0ce4af9SIan Campbell /*
1205e0ce4af9SIan Campbell * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1206e0ce4af9SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO
1207e0ce4af9SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1208e0ce4af9SIan Campbell * recalculate the partial checksum.
1209e0ce4af9SIan Campbell */
1210e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1211e0ce4af9SIan Campbell struct netfront_info *np = netdev_priv(dev);
12122688fcb7SAndrew J. Bennieston atomic_inc(&np->rx_gso_checksum_fixup);
1213e0ce4af9SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL;
1214b5cf66cdSPaul Durrant recalculate_partial_csum = true;
1215e0ce4af9SIan Campbell }
1216e0ce4af9SIan Campbell
1217e0ce4af9SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1218e0ce4af9SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL)
1219e0ce4af9SIan Campbell return 0;
12200d160211SJeremy Fitzhardinge
1221b5cf66cdSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum);
12220d160211SJeremy Fitzhardinge }
12230d160211SJeremy Fitzhardinge
handle_incoming_queue(struct netfront_queue * queue,struct sk_buff_head * rxq)12242688fcb7SAndrew J. Bennieston static int handle_incoming_queue(struct netfront_queue *queue,
12250d160211SJeremy Fitzhardinge struct sk_buff_head *rxq)
12260d160211SJeremy Fitzhardinge {
1227900e1833SDavid Vrabel struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
12280d160211SJeremy Fitzhardinge int packets_dropped = 0;
12290d160211SJeremy Fitzhardinge struct sk_buff *skb;
12300d160211SJeremy Fitzhardinge
12310d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(rxq)) != NULL) {
12323683243bSIan Campbell int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
12330d160211SJeremy Fitzhardinge
1234093b9c71SJan Beulich if (pull_to > skb_headlen(skb))
12353683243bSIan Campbell __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
12360d160211SJeremy Fitzhardinge
12370d160211SJeremy Fitzhardinge /* Ethernet work: Delayed to here as it peeks the header. */
12382688fcb7SAndrew J. Bennieston skb->protocol = eth_type_trans(skb, queue->info->netdev);
1239d554f73dSWei Liu skb_reset_network_header(skb);
12400d160211SJeremy Fitzhardinge
12412688fcb7SAndrew J. Bennieston if (checksum_setup(queue->info->netdev, skb)) {
12420d160211SJeremy Fitzhardinge kfree_skb(skb);
12430d160211SJeremy Fitzhardinge packets_dropped++;
12442688fcb7SAndrew J. Bennieston queue->info->netdev->stats.rx_errors++;
12450d160211SJeremy Fitzhardinge continue;
12460d160211SJeremy Fitzhardinge }
12470d160211SJeremy Fitzhardinge
1248900e1833SDavid Vrabel u64_stats_update_begin(&rx_stats->syncp);
1249900e1833SDavid Vrabel rx_stats->packets++;
1250900e1833SDavid Vrabel rx_stats->bytes += skb->len;
1251900e1833SDavid Vrabel u64_stats_update_end(&rx_stats->syncp);
12520d160211SJeremy Fitzhardinge
12530d160211SJeremy Fitzhardinge /* Pass it up. */
12542688fcb7SAndrew J. Bennieston napi_gro_receive(&queue->napi, skb);
12550d160211SJeremy Fitzhardinge }
12560d160211SJeremy Fitzhardinge
12570d160211SJeremy Fitzhardinge return packets_dropped;
12580d160211SJeremy Fitzhardinge }
12590d160211SJeremy Fitzhardinge
xennet_poll(struct napi_struct * napi,int budget)1260bea3348eSStephen Hemminger static int xennet_poll(struct napi_struct *napi, int budget)
12610d160211SJeremy Fitzhardinge {
12622688fcb7SAndrew J. Bennieston struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
12632688fcb7SAndrew J. Bennieston struct net_device *dev = queue->info->netdev;
12640d160211SJeremy Fitzhardinge struct sk_buff *skb;
12650d160211SJeremy Fitzhardinge struct netfront_rx_info rinfo;
12660d160211SJeremy Fitzhardinge struct xen_netif_rx_response *rx = &rinfo.rx;
12670d160211SJeremy Fitzhardinge struct xen_netif_extra_info *extras = rinfo.extras;
12680d160211SJeremy Fitzhardinge RING_IDX i, rp;
1269bea3348eSStephen Hemminger int work_done;
12700d160211SJeremy Fitzhardinge struct sk_buff_head rxq;
12710d160211SJeremy Fitzhardinge struct sk_buff_head errq;
12720d160211SJeremy Fitzhardinge struct sk_buff_head tmpq;
12730d160211SJeremy Fitzhardinge int err;
12746c5aa6fcSDenis Kirjanov bool need_xdp_flush = false;
12750d160211SJeremy Fitzhardinge
12762688fcb7SAndrew J. Bennieston spin_lock(&queue->rx_lock);
12770d160211SJeremy Fitzhardinge
12780d160211SJeremy Fitzhardinge skb_queue_head_init(&rxq);
12790d160211SJeremy Fitzhardinge skb_queue_head_init(&errq);
12800d160211SJeremy Fitzhardinge skb_queue_head_init(&tmpq);
12810d160211SJeremy Fitzhardinge
12822688fcb7SAndrew J. Bennieston rp = queue->rx.sring->rsp_prod;
1283a884daa6SJuergen Gross if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1284a884daa6SJuergen Gross dev_alert(&dev->dev, "Illegal number of responses %u\n",
1285a884daa6SJuergen Gross rp - queue->rx.rsp_cons);
1286a884daa6SJuergen Gross queue->info->broken = true;
1287a884daa6SJuergen Gross spin_unlock(&queue->rx_lock);
1288a884daa6SJuergen Gross return 0;
1289a884daa6SJuergen Gross }
12900d160211SJeremy Fitzhardinge rmb(); /* Ensure we see queued responses up to 'rp'. */
12910d160211SJeremy Fitzhardinge
12922688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons;
12930d160211SJeremy Fitzhardinge work_done = 0;
12940d160211SJeremy Fitzhardinge while ((i != rp) && (work_done < budget)) {
12958446066bSJuergen Gross RING_COPY_RESPONSE(&queue->rx, i, rx);
12960d160211SJeremy Fitzhardinge memset(extras, 0, sizeof(rinfo.extras));
12970d160211SJeremy Fitzhardinge
12986c5aa6fcSDenis Kirjanov err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
12996c5aa6fcSDenis Kirjanov &need_xdp_flush);
13000d160211SJeremy Fitzhardinge
13010d160211SJeremy Fitzhardinge if (unlikely(err)) {
130266e3531bSJuergen Gross if (queue->info->broken) {
130366e3531bSJuergen Gross spin_unlock(&queue->rx_lock);
130466e3531bSJuergen Gross return 0;
130566e3531bSJuergen Gross }
13060d160211SJeremy Fitzhardinge err:
13070d160211SJeremy Fitzhardinge while ((skb = __skb_dequeue(&tmpq)))
13080d160211SJeremy Fitzhardinge __skb_queue_tail(&errq, skb);
130909f75cd7SJeff Garzik dev->stats.rx_errors++;
13102688fcb7SAndrew J. Bennieston i = queue->rx.rsp_cons;
13110d160211SJeremy Fitzhardinge continue;
13120d160211SJeremy Fitzhardinge }
13130d160211SJeremy Fitzhardinge
13140d160211SJeremy Fitzhardinge skb = __skb_dequeue(&tmpq);
13150d160211SJeremy Fitzhardinge
13160d160211SJeremy Fitzhardinge if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
13170d160211SJeremy Fitzhardinge struct xen_netif_extra_info *gso;
13180d160211SJeremy Fitzhardinge gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
13190d160211SJeremy Fitzhardinge
13200d160211SJeremy Fitzhardinge if (unlikely(xennet_set_skb_gso(skb, gso))) {
13210d160211SJeremy Fitzhardinge __skb_queue_head(&tmpq, skb);
1322b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue,
1323b27d4795SJuergen Gross queue->rx.rsp_cons +
1324b27d4795SJuergen Gross skb_queue_len(&tmpq));
13250d160211SJeremy Fitzhardinge goto err;
13260d160211SJeremy Fitzhardinge }
13270d160211SJeremy Fitzhardinge }
13280d160211SJeremy Fitzhardinge
13293683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = rx->status;
13303683243bSIan Campbell if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
13313683243bSIan Campbell NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
13320d160211SJeremy Fitzhardinge
1333b54c9d5bSJonathan Lemon skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
13343683243bSIan Campbell skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
13353683243bSIan Campbell skb->data_len = rx->status;
1336093b9c71SJan Beulich skb->len += rx->status;
13370d160211SJeremy Fitzhardinge
1338a761129eSDongli Zhang if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1339ad4f15dcSJuergen Gross goto err;
13400d160211SJeremy Fitzhardinge
1341f942dc25SIan Campbell if (rx->flags & XEN_NETRXF_csum_blank)
13420d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_PARTIAL;
1343f942dc25SIan Campbell else if (rx->flags & XEN_NETRXF_data_validated)
13440d160211SJeremy Fitzhardinge skb->ip_summed = CHECKSUM_UNNECESSARY;
13450d160211SJeremy Fitzhardinge
13460d160211SJeremy Fitzhardinge __skb_queue_tail(&rxq, skb);
13470d160211SJeremy Fitzhardinge
1348b27d4795SJuergen Gross i = queue->rx.rsp_cons + 1;
1349b27d4795SJuergen Gross xennet_set_rx_rsp_cons(queue, i);
13500d160211SJeremy Fitzhardinge work_done++;
13510d160211SJeremy Fitzhardinge }
13526c5aa6fcSDenis Kirjanov if (need_xdp_flush)
13536c5aa6fcSDenis Kirjanov xdp_do_flush();
13540d160211SJeremy Fitzhardinge
135556cfe5d0SWang Chen __skb_queue_purge(&errq);
13560d160211SJeremy Fitzhardinge
13572688fcb7SAndrew J. Bennieston work_done -= handle_incoming_queue(queue, &rxq);
13580d160211SJeremy Fitzhardinge
13592688fcb7SAndrew J. Bennieston xennet_alloc_rx_buffers(queue);
13600d160211SJeremy Fitzhardinge
13610d160211SJeremy Fitzhardinge if (work_done < budget) {
1362bea3348eSStephen Hemminger int more_to_do = 0;
1363bea3348eSStephen Hemminger
13646ad20165SEric Dumazet napi_complete_done(napi, work_done);
13650d160211SJeremy Fitzhardinge
13662688fcb7SAndrew J. Bennieston RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
13676a6dc08fSDavid Vrabel if (more_to_do)
13686a6dc08fSDavid Vrabel napi_schedule(napi);
13690d160211SJeremy Fitzhardinge }
13700d160211SJeremy Fitzhardinge
13712688fcb7SAndrew J. Bennieston spin_unlock(&queue->rx_lock);
13720d160211SJeremy Fitzhardinge
1373bea3348eSStephen Hemminger return work_done;
13740d160211SJeremy Fitzhardinge }
13750d160211SJeremy Fitzhardinge
xennet_change_mtu(struct net_device * dev,int mtu)13760d160211SJeremy Fitzhardinge static int xennet_change_mtu(struct net_device *dev, int mtu)
13770d160211SJeremy Fitzhardinge {
13780c36820eSJonathan Davies int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
13790d160211SJeremy Fitzhardinge
13800d160211SJeremy Fitzhardinge if (mtu > max)
13810d160211SJeremy Fitzhardinge return -EINVAL;
13820d160211SJeremy Fitzhardinge dev->mtu = mtu;
13830d160211SJeremy Fitzhardinge return 0;
13840d160211SJeremy Fitzhardinge }
13850d160211SJeremy Fitzhardinge
xennet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * tot)1386bc1f4470Sstephen hemminger static void xennet_get_stats64(struct net_device *dev,
1387e00f85beSstephen hemminger struct rtnl_link_stats64 *tot)
1388e00f85beSstephen hemminger {
1389e00f85beSstephen hemminger struct netfront_info *np = netdev_priv(dev);
1390e00f85beSstephen hemminger int cpu;
1391e00f85beSstephen hemminger
1392e00f85beSstephen hemminger for_each_possible_cpu(cpu) {
1393900e1833SDavid Vrabel struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1394900e1833SDavid Vrabel struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1395e00f85beSstephen hemminger u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1396e00f85beSstephen hemminger unsigned int start;
1397e00f85beSstephen hemminger
1398e00f85beSstephen hemminger do {
1399068c38adSThomas Gleixner start = u64_stats_fetch_begin(&tx_stats->syncp);
1400900e1833SDavid Vrabel tx_packets = tx_stats->packets;
1401900e1833SDavid Vrabel tx_bytes = tx_stats->bytes;
1402068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
1403e00f85beSstephen hemminger
1404900e1833SDavid Vrabel do {
1405068c38adSThomas Gleixner start = u64_stats_fetch_begin(&rx_stats->syncp);
1406900e1833SDavid Vrabel rx_packets = rx_stats->packets;
1407900e1833SDavid Vrabel rx_bytes = rx_stats->bytes;
1408068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
1409e00f85beSstephen hemminger
1410e00f85beSstephen hemminger tot->rx_packets += rx_packets;
1411e00f85beSstephen hemminger tot->tx_packets += tx_packets;
1412e00f85beSstephen hemminger tot->rx_bytes += rx_bytes;
1413e00f85beSstephen hemminger tot->tx_bytes += tx_bytes;
1414e00f85beSstephen hemminger }
1415e00f85beSstephen hemminger
1416e00f85beSstephen hemminger tot->rx_errors = dev->stats.rx_errors;
1417e00f85beSstephen hemminger tot->tx_dropped = dev->stats.tx_dropped;
1418e00f85beSstephen hemminger }
1419e00f85beSstephen hemminger
xennet_release_tx_bufs(struct netfront_queue * queue)14202688fcb7SAndrew J. Bennieston static void xennet_release_tx_bufs(struct netfront_queue *queue)
14210d160211SJeremy Fitzhardinge {
14220d160211SJeremy Fitzhardinge struct sk_buff *skb;
14230d160211SJeremy Fitzhardinge int i;
14240d160211SJeremy Fitzhardinge
14250d160211SJeremy Fitzhardinge for (i = 0; i < NET_TX_RING_SIZE; i++) {
14260d160211SJeremy Fitzhardinge /* Skip over entries which are actually freelist references */
142721631d2dSJuergen Gross if (!queue->tx_skbs[i])
14280d160211SJeremy Fitzhardinge continue;
14290d160211SJeremy Fitzhardinge
143021631d2dSJuergen Gross skb = queue->tx_skbs[i];
143121631d2dSJuergen Gross queue->tx_skbs[i] = NULL;
14322688fcb7SAndrew J. Bennieston get_page(queue->grant_tx_page[i]);
14332688fcb7SAndrew J. Bennieston gnttab_end_foreign_access(queue->grant_tx_ref[i],
143449f8b459SJuergen Gross queue->grant_tx_page[i]);
14352688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL;
1436145daab2SJuergen Gross queue->grant_tx_ref[i] = INVALID_GRANT_REF;
143721631d2dSJuergen Gross add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
14380d160211SJeremy Fitzhardinge dev_kfree_skb_irq(skb);
14390d160211SJeremy Fitzhardinge }
14400d160211SJeremy Fitzhardinge }
14410d160211SJeremy Fitzhardinge
xennet_release_rx_bufs(struct netfront_queue * queue)14422688fcb7SAndrew J. Bennieston static void xennet_release_rx_bufs(struct netfront_queue *queue)
14430d160211SJeremy Fitzhardinge {
14440d160211SJeremy Fitzhardinge int id, ref;
14450d160211SJeremy Fitzhardinge
14462688fcb7SAndrew J. Bennieston spin_lock_bh(&queue->rx_lock);
14470d160211SJeremy Fitzhardinge
14480d160211SJeremy Fitzhardinge for (id = 0; id < NET_RX_RING_SIZE; id++) {
1449cefe0078SAnnie Li struct sk_buff *skb;
1450cefe0078SAnnie Li struct page *page;
14510d160211SJeremy Fitzhardinge
14522688fcb7SAndrew J. Bennieston skb = queue->rx_skbs[id];
1453cefe0078SAnnie Li if (!skb)
1454cefe0078SAnnie Li continue;
1455cefe0078SAnnie Li
14562688fcb7SAndrew J. Bennieston ref = queue->grant_rx_ref[id];
1457145daab2SJuergen Gross if (ref == INVALID_GRANT_REF)
1458cefe0078SAnnie Li continue;
1459cefe0078SAnnie Li
1460cefe0078SAnnie Li page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1461cefe0078SAnnie Li
1462cefe0078SAnnie Li /* gnttab_end_foreign_access() needs a page ref until
1463cefe0078SAnnie Li * foreign access is ended (which may be deferred).
1464cefe0078SAnnie Li */
1465cefe0078SAnnie Li get_page(page);
146649f8b459SJuergen Gross gnttab_end_foreign_access(ref, page);
1467145daab2SJuergen Gross queue->grant_rx_ref[id] = INVALID_GRANT_REF;
14680d160211SJeremy Fitzhardinge
1469cefe0078SAnnie Li kfree_skb(skb);
14700d160211SJeremy Fitzhardinge }
14710d160211SJeremy Fitzhardinge
14722688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock);
14730d160211SJeremy Fitzhardinge }
14740d160211SJeremy Fitzhardinge
xennet_fix_features(struct net_device * dev,netdev_features_t features)1475c8f44affSMichał Mirosław static netdev_features_t xennet_fix_features(struct net_device *dev,
1476c8f44affSMichał Mirosław netdev_features_t features)
14778f7b01a1SEric Dumazet {
14788f7b01a1SEric Dumazet struct netfront_info *np = netdev_priv(dev);
14798f7b01a1SEric Dumazet
14802890ea5cSJuergen Gross if (features & NETIF_F_SG &&
14812890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
14828f7b01a1SEric Dumazet features &= ~NETIF_F_SG;
14838f7b01a1SEric Dumazet
14842890ea5cSJuergen Gross if (features & NETIF_F_IPV6_CSUM &&
14852890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend,
14862890ea5cSJuergen Gross "feature-ipv6-csum-offload", 0))
14872c0057deSPaul Durrant features &= ~NETIF_F_IPV6_CSUM;
14882c0057deSPaul Durrant
14892890ea5cSJuergen Gross if (features & NETIF_F_TSO &&
14902890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
14918f7b01a1SEric Dumazet features &= ~NETIF_F_TSO;
14928f7b01a1SEric Dumazet
14932890ea5cSJuergen Gross if (features & NETIF_F_TSO6 &&
14942890ea5cSJuergen Gross !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
14952c0057deSPaul Durrant features &= ~NETIF_F_TSO6;
14962c0057deSPaul Durrant
14978f7b01a1SEric Dumazet return features;
14988f7b01a1SEric Dumazet }
14998f7b01a1SEric Dumazet
xennet_set_features(struct net_device * dev,netdev_features_t features)1500c8f44affSMichał Mirosław static int xennet_set_features(struct net_device *dev,
1501c8f44affSMichał Mirosław netdev_features_t features)
15028f7b01a1SEric Dumazet {
15038f7b01a1SEric Dumazet if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
15048f7b01a1SEric Dumazet netdev_info(dev, "Reducing MTU because no SG offload");
15058f7b01a1SEric Dumazet dev->mtu = ETH_DATA_LEN;
15068f7b01a1SEric Dumazet }
15078f7b01a1SEric Dumazet
15088f7b01a1SEric Dumazet return 0;
15098f7b01a1SEric Dumazet }
15108f7b01a1SEric Dumazet
xennet_handle_tx(struct netfront_queue * queue,unsigned int * eoi)1511b27d4795SJuergen Gross static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1512cf66f9d4SKonrad Rzeszutek Wilk {
1513cf66f9d4SKonrad Rzeszutek Wilk unsigned long flags;
1514cf66f9d4SKonrad Rzeszutek Wilk
1515b27d4795SJuergen Gross if (unlikely(queue->info->broken))
1516b27d4795SJuergen Gross return false;
1517a884daa6SJuergen Gross
15182688fcb7SAndrew J. Bennieston spin_lock_irqsave(&queue->tx_lock, flags);
1519b27d4795SJuergen Gross if (xennet_tx_buf_gc(queue))
1520b27d4795SJuergen Gross *eoi = 0;
15212688fcb7SAndrew J. Bennieston spin_unlock_irqrestore(&queue->tx_lock, flags);
1522cf66f9d4SKonrad Rzeszutek Wilk
1523b27d4795SJuergen Gross return true;
1524b27d4795SJuergen Gross }
1525b27d4795SJuergen Gross
xennet_tx_interrupt(int irq,void * dev_id)1526b27d4795SJuergen Gross static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1527b27d4795SJuergen Gross {
1528b27d4795SJuergen Gross unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1529b27d4795SJuergen Gross
1530b27d4795SJuergen Gross if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1531b27d4795SJuergen Gross xen_irq_lateeoi(irq, eoiflag);
1532b27d4795SJuergen Gross
1533cf66f9d4SKonrad Rzeszutek Wilk return IRQ_HANDLED;
1534cf66f9d4SKonrad Rzeszutek Wilk }
1535cf66f9d4SKonrad Rzeszutek Wilk
xennet_handle_rx(struct netfront_queue * queue,unsigned int * eoi)1536b27d4795SJuergen Gross static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1537b27d4795SJuergen Gross {
1538b27d4795SJuergen Gross unsigned int work_queued;
1539b27d4795SJuergen Gross unsigned long flags;
1540b27d4795SJuergen Gross
1541b27d4795SJuergen Gross if (unlikely(queue->info->broken))
1542b27d4795SJuergen Gross return false;
1543b27d4795SJuergen Gross
1544b27d4795SJuergen Gross spin_lock_irqsave(&queue->rx_cons_lock, flags);
15456fac592cSJuergen Gross work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1546b27d4795SJuergen Gross if (work_queued > queue->rx_rsp_unconsumed) {
1547b27d4795SJuergen Gross queue->rx_rsp_unconsumed = work_queued;
1548b27d4795SJuergen Gross *eoi = 0;
1549b27d4795SJuergen Gross } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1550b27d4795SJuergen Gross const struct device *dev = &queue->info->netdev->dev;
1551b27d4795SJuergen Gross
1552b27d4795SJuergen Gross spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1553b27d4795SJuergen Gross dev_alert(dev, "RX producer index going backwards\n");
1554b27d4795SJuergen Gross dev_alert(dev, "Disabled for further use\n");
1555b27d4795SJuergen Gross queue->info->broken = true;
1556b27d4795SJuergen Gross return false;
1557b27d4795SJuergen Gross }
1558b27d4795SJuergen Gross spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1559b27d4795SJuergen Gross
1560b27d4795SJuergen Gross if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1561b27d4795SJuergen Gross napi_schedule(&queue->napi);
1562b27d4795SJuergen Gross
1563b27d4795SJuergen Gross return true;
1564b27d4795SJuergen Gross }
1565b27d4795SJuergen Gross
xennet_rx_interrupt(int irq,void * dev_id)1566d634bf2cSWei Liu static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1567d634bf2cSWei Liu {
1568b27d4795SJuergen Gross unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1569d634bf2cSWei Liu
1570b27d4795SJuergen Gross if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1571b27d4795SJuergen Gross xen_irq_lateeoi(irq, eoiflag);
1572d634bf2cSWei Liu
1573d634bf2cSWei Liu return IRQ_HANDLED;
1574d634bf2cSWei Liu }
1575d634bf2cSWei Liu
xennet_interrupt(int irq,void * dev_id)1576d634bf2cSWei Liu static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1577d634bf2cSWei Liu {
1578b27d4795SJuergen Gross unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1579b27d4795SJuergen Gross
1580b27d4795SJuergen Gross if (xennet_handle_tx(dev_id, &eoiflag) &&
1581b27d4795SJuergen Gross xennet_handle_rx(dev_id, &eoiflag))
1582b27d4795SJuergen Gross xen_irq_lateeoi(irq, eoiflag);
1583b27d4795SJuergen Gross
1584d634bf2cSWei Liu return IRQ_HANDLED;
1585d634bf2cSWei Liu }
1586d634bf2cSWei Liu
1587cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER
xennet_poll_controller(struct net_device * dev)1588cf66f9d4SKonrad Rzeszutek Wilk static void xennet_poll_controller(struct net_device *dev)
1589cf66f9d4SKonrad Rzeszutek Wilk {
15902688fcb7SAndrew J. Bennieston /* Poll each queue */
15912688fcb7SAndrew J. Bennieston struct netfront_info *info = netdev_priv(dev);
15922688fcb7SAndrew J. Bennieston unsigned int num_queues = dev->real_num_tx_queues;
15932688fcb7SAndrew J. Bennieston unsigned int i;
1594a884daa6SJuergen Gross
1595a884daa6SJuergen Gross if (info->broken)
1596a884daa6SJuergen Gross return;
1597a884daa6SJuergen Gross
15982688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i)
15992688fcb7SAndrew J. Bennieston xennet_interrupt(0, &info->queues[i]);
1600cf66f9d4SKonrad Rzeszutek Wilk }
1601cf66f9d4SKonrad Rzeszutek Wilk #endif
1602cf66f9d4SKonrad Rzeszutek Wilk
16036c5aa6fcSDenis Kirjanov #define NETBACK_XDP_HEADROOM_DISABLE 0
16046c5aa6fcSDenis Kirjanov #define NETBACK_XDP_HEADROOM_ENABLE 1
16056c5aa6fcSDenis Kirjanov
talk_to_netback_xdp(struct netfront_info * np,int xdp)16066c5aa6fcSDenis Kirjanov static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
16076c5aa6fcSDenis Kirjanov {
16086c5aa6fcSDenis Kirjanov int err;
16096c5aa6fcSDenis Kirjanov unsigned short headroom;
16106c5aa6fcSDenis Kirjanov
16116c5aa6fcSDenis Kirjanov headroom = xdp ? XDP_PACKET_HEADROOM : 0;
16126c5aa6fcSDenis Kirjanov err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
16136c5aa6fcSDenis Kirjanov "xdp-headroom", "%hu",
16146c5aa6fcSDenis Kirjanov headroom);
16156c5aa6fcSDenis Kirjanov if (err)
16166c5aa6fcSDenis Kirjanov pr_warn("Error writing xdp-headroom\n");
16176c5aa6fcSDenis Kirjanov
16186c5aa6fcSDenis Kirjanov return err;
16196c5aa6fcSDenis Kirjanov }
16206c5aa6fcSDenis Kirjanov
xennet_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)16216c5aa6fcSDenis Kirjanov static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
16226c5aa6fcSDenis Kirjanov struct netlink_ext_ack *extack)
16236c5aa6fcSDenis Kirjanov {
16246c5aa6fcSDenis Kirjanov unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
16256c5aa6fcSDenis Kirjanov struct netfront_info *np = netdev_priv(dev);
16266c5aa6fcSDenis Kirjanov struct bpf_prog *old_prog;
16276c5aa6fcSDenis Kirjanov unsigned int i, err;
16286c5aa6fcSDenis Kirjanov
16296c5aa6fcSDenis Kirjanov if (dev->mtu > max_mtu) {
16306c5aa6fcSDenis Kirjanov netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
16316c5aa6fcSDenis Kirjanov return -EINVAL;
16326c5aa6fcSDenis Kirjanov }
16336c5aa6fcSDenis Kirjanov
16346c5aa6fcSDenis Kirjanov if (!np->netback_has_xdp_headroom)
16356c5aa6fcSDenis Kirjanov return 0;
16366c5aa6fcSDenis Kirjanov
16376c5aa6fcSDenis Kirjanov xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
16386c5aa6fcSDenis Kirjanov
16396c5aa6fcSDenis Kirjanov err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
16406c5aa6fcSDenis Kirjanov NETBACK_XDP_HEADROOM_DISABLE);
16416c5aa6fcSDenis Kirjanov if (err)
16426c5aa6fcSDenis Kirjanov return err;
16436c5aa6fcSDenis Kirjanov
16446c5aa6fcSDenis Kirjanov /* avoid the race with XDP headroom adjustment */
16456c5aa6fcSDenis Kirjanov wait_event(module_wq,
16466c5aa6fcSDenis Kirjanov xenbus_read_driver_state(np->xbdev->otherend) ==
16476c5aa6fcSDenis Kirjanov XenbusStateReconfigured);
16486c5aa6fcSDenis Kirjanov np->netfront_xdp_enabled = true;
16496c5aa6fcSDenis Kirjanov
16506c5aa6fcSDenis Kirjanov old_prog = rtnl_dereference(np->queues[0].xdp_prog);
16516c5aa6fcSDenis Kirjanov
16526c5aa6fcSDenis Kirjanov if (prog)
16536c5aa6fcSDenis Kirjanov bpf_prog_add(prog, dev->real_num_tx_queues);
16546c5aa6fcSDenis Kirjanov
16556c5aa6fcSDenis Kirjanov for (i = 0; i < dev->real_num_tx_queues; ++i)
16566c5aa6fcSDenis Kirjanov rcu_assign_pointer(np->queues[i].xdp_prog, prog);
16576c5aa6fcSDenis Kirjanov
16586c5aa6fcSDenis Kirjanov if (old_prog)
16596c5aa6fcSDenis Kirjanov for (i = 0; i < dev->real_num_tx_queues; ++i)
16606c5aa6fcSDenis Kirjanov bpf_prog_put(old_prog);
16616c5aa6fcSDenis Kirjanov
16626c5aa6fcSDenis Kirjanov xenbus_switch_state(np->xbdev, XenbusStateConnected);
16636c5aa6fcSDenis Kirjanov
16646c5aa6fcSDenis Kirjanov return 0;
16656c5aa6fcSDenis Kirjanov }
16666c5aa6fcSDenis Kirjanov
xennet_xdp(struct net_device * dev,struct netdev_bpf * xdp)16676c5aa6fcSDenis Kirjanov static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
16686c5aa6fcSDenis Kirjanov {
1669a884daa6SJuergen Gross struct netfront_info *np = netdev_priv(dev);
1670a884daa6SJuergen Gross
1671a884daa6SJuergen Gross if (np->broken)
1672a884daa6SJuergen Gross return -ENODEV;
1673a884daa6SJuergen Gross
16746c5aa6fcSDenis Kirjanov switch (xdp->command) {
16756c5aa6fcSDenis Kirjanov case XDP_SETUP_PROG:
16766c5aa6fcSDenis Kirjanov return xennet_xdp_set(dev, xdp->prog, xdp->extack);
16776c5aa6fcSDenis Kirjanov default:
16786c5aa6fcSDenis Kirjanov return -EINVAL;
16796c5aa6fcSDenis Kirjanov }
16806c5aa6fcSDenis Kirjanov }
16816c5aa6fcSDenis Kirjanov
16820a0b9d2eSStephen Hemminger static const struct net_device_ops xennet_netdev_ops = {
1683dcf4ff7aSMarek Marczykowski-Górecki .ndo_uninit = xennet_uninit,
16840a0b9d2eSStephen Hemminger .ndo_open = xennet_open,
16850a0b9d2eSStephen Hemminger .ndo_stop = xennet_close,
16860a0b9d2eSStephen Hemminger .ndo_start_xmit = xennet_start_xmit,
16870a0b9d2eSStephen Hemminger .ndo_change_mtu = xennet_change_mtu,
1688e00f85beSstephen hemminger .ndo_get_stats64 = xennet_get_stats64,
16890a0b9d2eSStephen Hemminger .ndo_set_mac_address = eth_mac_addr,
16900a0b9d2eSStephen Hemminger .ndo_validate_addr = eth_validate_addr,
1691fb507934SMichał Mirosław .ndo_fix_features = xennet_fix_features,
1692fb507934SMichał Mirosław .ndo_set_features = xennet_set_features,
16932688fcb7SAndrew J. Bennieston .ndo_select_queue = xennet_select_queue,
16946c5aa6fcSDenis Kirjanov .ndo_bpf = xennet_xdp,
16956c5aa6fcSDenis Kirjanov .ndo_xdp_xmit = xennet_xdp_xmit,
1696cf66f9d4SKonrad Rzeszutek Wilk #ifdef CONFIG_NET_POLL_CONTROLLER
1697cf66f9d4SKonrad Rzeszutek Wilk .ndo_poll_controller = xennet_poll_controller,
1698cf66f9d4SKonrad Rzeszutek Wilk #endif
16990a0b9d2eSStephen Hemminger };
17000a0b9d2eSStephen Hemminger
xennet_free_netdev(struct net_device * netdev)1701900e1833SDavid Vrabel static void xennet_free_netdev(struct net_device *netdev)
1702900e1833SDavid Vrabel {
1703900e1833SDavid Vrabel struct netfront_info *np = netdev_priv(netdev);
1704900e1833SDavid Vrabel
1705900e1833SDavid Vrabel free_percpu(np->rx_stats);
1706900e1833SDavid Vrabel free_percpu(np->tx_stats);
1707900e1833SDavid Vrabel free_netdev(netdev);
1708900e1833SDavid Vrabel }
1709900e1833SDavid Vrabel
xennet_create_dev(struct xenbus_device * dev)17108e0e46bbSBill Pemberton static struct net_device *xennet_create_dev(struct xenbus_device *dev)
17110d160211SJeremy Fitzhardinge {
17122688fcb7SAndrew J. Bennieston int err;
17130d160211SJeremy Fitzhardinge struct net_device *netdev;
17140d160211SJeremy Fitzhardinge struct netfront_info *np;
17150d160211SJeremy Fitzhardinge
171650ee6061SAndrew J. Bennieston netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
171741de8d4cSJoe Perches if (!netdev)
17180d160211SJeremy Fitzhardinge return ERR_PTR(-ENOMEM);
17190d160211SJeremy Fitzhardinge
17200d160211SJeremy Fitzhardinge np = netdev_priv(netdev);
17210d160211SJeremy Fitzhardinge np->xbdev = dev;
17220d160211SJeremy Fitzhardinge
17232688fcb7SAndrew J. Bennieston np->queues = NULL;
17240d160211SJeremy Fitzhardinge
1725e00f85beSstephen hemminger err = -ENOMEM;
1726900e1833SDavid Vrabel np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1727900e1833SDavid Vrabel if (np->rx_stats == NULL)
1728900e1833SDavid Vrabel goto exit;
1729900e1833SDavid Vrabel np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1730900e1833SDavid Vrabel if (np->tx_stats == NULL)
1731e00f85beSstephen hemminger goto exit;
1732e00f85beSstephen hemminger
17330a0b9d2eSStephen Hemminger netdev->netdev_ops = &xennet_netdev_ops;
17340a0b9d2eSStephen Hemminger
1735fb507934SMichał Mirosław netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1736fb507934SMichał Mirosław NETIF_F_GSO_ROBUST;
17372c0057deSPaul Durrant netdev->hw_features = NETIF_F_SG |
17382c0057deSPaul Durrant NETIF_F_IPV6_CSUM |
17392c0057deSPaul Durrant NETIF_F_TSO | NETIF_F_TSO6;
17400d160211SJeremy Fitzhardinge
1741fc3e5941SIan Campbell /*
1742fc3e5941SIan Campbell * Assume that all hw features are available for now. This set
1743fc3e5941SIan Campbell * will be adjusted by the call to netdev_update_features() in
1744fc3e5941SIan Campbell * xennet_connect() which is the earliest point where we can
1745fc3e5941SIan Campbell * negotiate with the backend regarding supported features.
1746fc3e5941SIan Campbell */
1747fc3e5941SIan Campbell netdev->features |= netdev->hw_features;
174866c0e13aSMarek Majtyka netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
174966c0e13aSMarek Majtyka NETDEV_XDP_ACT_NDO_XMIT;
1750fc3e5941SIan Campbell
17517ad24ea4SWilfried Klaebe netdev->ethtool_ops = &xennet_ethtool_ops;
1752e1043a4bSMohammed Gamal netdev->min_mtu = ETH_MIN_MTU;
1753d0c2c997SJarod Wilson netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
17540d160211SJeremy Fitzhardinge SET_NETDEV_DEV(netdev, &dev->dev);
17550d160211SJeremy Fitzhardinge
17560d160211SJeremy Fitzhardinge np->netdev = netdev;
17576c5aa6fcSDenis Kirjanov np->netfront_xdp_enabled = false;
17580d160211SJeremy Fitzhardinge
17590d160211SJeremy Fitzhardinge netif_carrier_off(netdev);
17600d160211SJeremy Fitzhardinge
1761c2c63310SAndrea Righi do {
1762b707fda2SEduardo Otubo xenbus_switch_state(dev, XenbusStateInitialising);
1763c2c63310SAndrea Righi err = wait_event_timeout(module_wq,
1764822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) !=
1765822fb18aSXiao Liang XenbusStateClosed &&
1766822fb18aSXiao Liang xenbus_read_driver_state(dev->otherend) !=
1767c2c63310SAndrea Righi XenbusStateUnknown, XENNET_TIMEOUT);
1768c2c63310SAndrea Righi } while (!err);
1769c2c63310SAndrea Righi
17700d160211SJeremy Fitzhardinge return netdev;
17710d160211SJeremy Fitzhardinge
17720d160211SJeremy Fitzhardinge exit:
1773900e1833SDavid Vrabel xennet_free_netdev(netdev);
17740d160211SJeremy Fitzhardinge return ERR_PTR(err);
17750d160211SJeremy Fitzhardinge }
17760d160211SJeremy Fitzhardinge
177780708602SLee Jones /*
17780d160211SJeremy Fitzhardinge * Entry point to this code when a new device is created. Allocate the basic
17790d160211SJeremy Fitzhardinge * structures and the ring buffers for communication with the backend, and
17800d160211SJeremy Fitzhardinge * inform the backend of the appropriate details for those.
17810d160211SJeremy Fitzhardinge */
netfront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)17828e0e46bbSBill Pemberton static int netfront_probe(struct xenbus_device *dev,
17830d160211SJeremy Fitzhardinge const struct xenbus_device_id *id)
17840d160211SJeremy Fitzhardinge {
17850d160211SJeremy Fitzhardinge int err;
17860d160211SJeremy Fitzhardinge struct net_device *netdev;
17870d160211SJeremy Fitzhardinge struct netfront_info *info;
17880d160211SJeremy Fitzhardinge
17890d160211SJeremy Fitzhardinge netdev = xennet_create_dev(dev);
17900d160211SJeremy Fitzhardinge if (IS_ERR(netdev)) {
17910d160211SJeremy Fitzhardinge err = PTR_ERR(netdev);
17920d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "creating netdev");
17930d160211SJeremy Fitzhardinge return err;
17940d160211SJeremy Fitzhardinge }
17950d160211SJeremy Fitzhardinge
17960d160211SJeremy Fitzhardinge info = netdev_priv(netdev);
17971b713e00SGreg Kroah-Hartman dev_set_drvdata(&dev->dev, info);
179827b917e5STakashi Iwai #ifdef CONFIG_SYSFS
179927b917e5STakashi Iwai info->netdev->sysfs_groups[0] = &xennet_dev_group;
180027b917e5STakashi Iwai #endif
18010d160211SJeremy Fitzhardinge
18020d160211SJeremy Fitzhardinge return 0;
18030d160211SJeremy Fitzhardinge }
18040d160211SJeremy Fitzhardinge
xennet_end_access(int ref,void * page)18050d160211SJeremy Fitzhardinge static void xennet_end_access(int ref, void *page)
18060d160211SJeremy Fitzhardinge {
18070d160211SJeremy Fitzhardinge /* This frees the page as a side-effect */
1808145daab2SJuergen Gross if (ref != INVALID_GRANT_REF)
180949f8b459SJuergen Gross gnttab_end_foreign_access(ref, virt_to_page(page));
18100d160211SJeremy Fitzhardinge }
18110d160211SJeremy Fitzhardinge
xennet_disconnect_backend(struct netfront_info * info)18120d160211SJeremy Fitzhardinge static void xennet_disconnect_backend(struct netfront_info *info)
18130d160211SJeremy Fitzhardinge {
18142688fcb7SAndrew J. Bennieston unsigned int i = 0;
18152688fcb7SAndrew J. Bennieston unsigned int num_queues = info->netdev->real_num_tx_queues;
18160d160211SJeremy Fitzhardinge
1817f9feb1e6SDavid Vrabel netif_carrier_off(info->netdev);
1818f9feb1e6SDavid Vrabel
18199a873c71SChas Williams for (i = 0; i < num_queues && info->queues; ++i) {
182076541869SDavid Vrabel struct netfront_queue *queue = &info->queues[i];
182176541869SDavid Vrabel
182274470954SBoris Ostrovsky del_timer_sync(&queue->rx_refill_timer);
182374470954SBoris Ostrovsky
18242688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
18252688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue);
18262688fcb7SAndrew J. Bennieston if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
18272688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue);
18282688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->rx_irq, queue);
1829d634bf2cSWei Liu }
18302688fcb7SAndrew J. Bennieston queue->tx_evtchn = queue->rx_evtchn = 0;
18312688fcb7SAndrew J. Bennieston queue->tx_irq = queue->rx_irq = 0;
18320d160211SJeremy Fitzhardinge
1833274b0455SChas Williams if (netif_running(info->netdev))
1834f9feb1e6SDavid Vrabel napi_synchronize(&queue->napi);
1835f9feb1e6SDavid Vrabel
1836a5b5dc3cSDavid Vrabel xennet_release_tx_bufs(queue);
1837a5b5dc3cSDavid Vrabel xennet_release_rx_bufs(queue);
1838a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_tx_head);
1839a5b5dc3cSDavid Vrabel gnttab_free_grant_references(queue->gref_rx_head);
1840a5b5dc3cSDavid Vrabel
18410d160211SJeremy Fitzhardinge /* End access and free the pages */
18422688fcb7SAndrew J. Bennieston xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
18432688fcb7SAndrew J. Bennieston xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
18440d160211SJeremy Fitzhardinge
1845145daab2SJuergen Gross queue->tx_ring_ref = INVALID_GRANT_REF;
1846145daab2SJuergen Gross queue->rx_ring_ref = INVALID_GRANT_REF;
18472688fcb7SAndrew J. Bennieston queue->tx.sring = NULL;
18482688fcb7SAndrew J. Bennieston queue->rx.sring = NULL;
18496c5aa6fcSDenis Kirjanov
18506c5aa6fcSDenis Kirjanov page_pool_destroy(queue->page_pool);
18512688fcb7SAndrew J. Bennieston }
18520d160211SJeremy Fitzhardinge }
18530d160211SJeremy Fitzhardinge
185480708602SLee Jones /*
18550d160211SJeremy Fitzhardinge * We are reconnecting to the backend, due to a suspend/resume, or a backend
18560d160211SJeremy Fitzhardinge * driver restart. We tear down our netif structure and recreate it, but
18570d160211SJeremy Fitzhardinge * leave the device-layer structures intact so that this is transparent to the
18580d160211SJeremy Fitzhardinge * rest of the kernel.
18590d160211SJeremy Fitzhardinge */
netfront_resume(struct xenbus_device * dev)18600d160211SJeremy Fitzhardinge static int netfront_resume(struct xenbus_device *dev)
18610d160211SJeremy Fitzhardinge {
18621b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev);
18630d160211SJeremy Fitzhardinge
18640d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", dev->nodename);
18650d160211SJeremy Fitzhardinge
1866042b2046SDongli Zhang netif_tx_lock_bh(info->netdev);
1867042b2046SDongli Zhang netif_device_detach(info->netdev);
1868042b2046SDongli Zhang netif_tx_unlock_bh(info->netdev);
1869042b2046SDongli Zhang
18700d160211SJeremy Fitzhardinge xennet_disconnect_backend(info);
1871d50b7914SLin Liu
1872d50b7914SLin Liu rtnl_lock();
1873d50b7914SLin Liu if (info->queues)
1874d50b7914SLin Liu xennet_destroy_queues(info);
1875d50b7914SLin Liu rtnl_unlock();
1876d50b7914SLin Liu
18770d160211SJeremy Fitzhardinge return 0;
18780d160211SJeremy Fitzhardinge }
18790d160211SJeremy Fitzhardinge
xen_net_read_mac(struct xenbus_device * dev,u8 mac[])18800d160211SJeremy Fitzhardinge static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
18810d160211SJeremy Fitzhardinge {
18820d160211SJeremy Fitzhardinge char *s, *e, *macstr;
18830d160211SJeremy Fitzhardinge int i;
18840d160211SJeremy Fitzhardinge
18850d160211SJeremy Fitzhardinge macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
18860d160211SJeremy Fitzhardinge if (IS_ERR(macstr))
18870d160211SJeremy Fitzhardinge return PTR_ERR(macstr);
18880d160211SJeremy Fitzhardinge
18890d160211SJeremy Fitzhardinge for (i = 0; i < ETH_ALEN; i++) {
18900d160211SJeremy Fitzhardinge mac[i] = simple_strtoul(s, &e, 16);
18910d160211SJeremy Fitzhardinge if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
18920d160211SJeremy Fitzhardinge kfree(macstr);
18930d160211SJeremy Fitzhardinge return -ENOENT;
18940d160211SJeremy Fitzhardinge }
18950d160211SJeremy Fitzhardinge s = e+1;
18960d160211SJeremy Fitzhardinge }
18970d160211SJeremy Fitzhardinge
18980d160211SJeremy Fitzhardinge kfree(macstr);
18990d160211SJeremy Fitzhardinge return 0;
19000d160211SJeremy Fitzhardinge }
19010d160211SJeremy Fitzhardinge
setup_netfront_single(struct netfront_queue * queue)19022688fcb7SAndrew J. Bennieston static int setup_netfront_single(struct netfront_queue *queue)
1903d634bf2cSWei Liu {
1904d634bf2cSWei Liu int err;
1905d634bf2cSWei Liu
19062688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1907d634bf2cSWei Liu if (err < 0)
1908d634bf2cSWei Liu goto fail;
1909d634bf2cSWei Liu
1910b27d4795SJuergen Gross err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1911b27d4795SJuergen Gross xennet_interrupt, 0,
1912b27d4795SJuergen Gross queue->info->netdev->name,
1913b27d4795SJuergen Gross queue);
1914d634bf2cSWei Liu if (err < 0)
1915d634bf2cSWei Liu goto bind_fail;
19162688fcb7SAndrew J. Bennieston queue->rx_evtchn = queue->tx_evtchn;
19172688fcb7SAndrew J. Bennieston queue->rx_irq = queue->tx_irq = err;
1918d634bf2cSWei Liu
1919d634bf2cSWei Liu return 0;
1920d634bf2cSWei Liu
1921d634bf2cSWei Liu bind_fail:
19222688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
19232688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0;
1924d634bf2cSWei Liu fail:
1925d634bf2cSWei Liu return err;
1926d634bf2cSWei Liu }
1927d634bf2cSWei Liu
setup_netfront_split(struct netfront_queue * queue)19282688fcb7SAndrew J. Bennieston static int setup_netfront_split(struct netfront_queue *queue)
1929d634bf2cSWei Liu {
1930d634bf2cSWei Liu int err;
1931d634bf2cSWei Liu
19322688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1933d634bf2cSWei Liu if (err < 0)
1934d634bf2cSWei Liu goto fail;
19352688fcb7SAndrew J. Bennieston err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1936d634bf2cSWei Liu if (err < 0)
1937d634bf2cSWei Liu goto alloc_rx_evtchn_fail;
1938d634bf2cSWei Liu
19392688fcb7SAndrew J. Bennieston snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
19402688fcb7SAndrew J. Bennieston "%s-tx", queue->name);
1941b27d4795SJuergen Gross err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1942b27d4795SJuergen Gross xennet_tx_interrupt, 0,
1943b27d4795SJuergen Gross queue->tx_irq_name, queue);
1944d634bf2cSWei Liu if (err < 0)
1945d634bf2cSWei Liu goto bind_tx_fail;
19462688fcb7SAndrew J. Bennieston queue->tx_irq = err;
1947d634bf2cSWei Liu
19482688fcb7SAndrew J. Bennieston snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
19492688fcb7SAndrew J. Bennieston "%s-rx", queue->name);
1950b27d4795SJuergen Gross err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1951b27d4795SJuergen Gross xennet_rx_interrupt, 0,
1952b27d4795SJuergen Gross queue->rx_irq_name, queue);
1953d634bf2cSWei Liu if (err < 0)
1954d634bf2cSWei Liu goto bind_rx_fail;
19552688fcb7SAndrew J. Bennieston queue->rx_irq = err;
1956d634bf2cSWei Liu
1957d634bf2cSWei Liu return 0;
1958d634bf2cSWei Liu
1959d634bf2cSWei Liu bind_rx_fail:
19602688fcb7SAndrew J. Bennieston unbind_from_irqhandler(queue->tx_irq, queue);
19612688fcb7SAndrew J. Bennieston queue->tx_irq = 0;
1962d634bf2cSWei Liu bind_tx_fail:
19632688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
19642688fcb7SAndrew J. Bennieston queue->rx_evtchn = 0;
1965d634bf2cSWei Liu alloc_rx_evtchn_fail:
19662688fcb7SAndrew J. Bennieston xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
19672688fcb7SAndrew J. Bennieston queue->tx_evtchn = 0;
1968d634bf2cSWei Liu fail:
1969d634bf2cSWei Liu return err;
1970d634bf2cSWei Liu }
1971d634bf2cSWei Liu
setup_netfront(struct xenbus_device * dev,struct netfront_queue * queue,unsigned int feature_split_evtchn)19722688fcb7SAndrew J. Bennieston static int setup_netfront(struct xenbus_device *dev,
19732688fcb7SAndrew J. Bennieston struct netfront_queue *queue, unsigned int feature_split_evtchn)
19740d160211SJeremy Fitzhardinge {
19750d160211SJeremy Fitzhardinge struct xen_netif_tx_sring *txs;
197646e20d43SJuergen Gross struct xen_netif_rx_sring *rxs;
19770d160211SJeremy Fitzhardinge int err;
19780d160211SJeremy Fitzhardinge
1979145daab2SJuergen Gross queue->tx_ring_ref = INVALID_GRANT_REF;
1980145daab2SJuergen Gross queue->rx_ring_ref = INVALID_GRANT_REF;
19812688fcb7SAndrew J. Bennieston queue->rx.sring = NULL;
19822688fcb7SAndrew J. Bennieston queue->tx.sring = NULL;
19830d160211SJeremy Fitzhardinge
198446e20d43SJuergen Gross err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
198546e20d43SJuergen Gross 1, &queue->tx_ring_ref);
198646e20d43SJuergen Gross if (err)
19870d160211SJeremy Fitzhardinge goto fail;
19880d160211SJeremy Fitzhardinge
198946e20d43SJuergen Gross XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
19900d160211SJeremy Fitzhardinge
199146e20d43SJuergen Gross err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
199246e20d43SJuergen Gross 1, &queue->rx_ring_ref);
199346e20d43SJuergen Gross if (err)
199466e3531bSJuergen Gross goto fail;
19950d160211SJeremy Fitzhardinge
199646e20d43SJuergen Gross XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
19970d160211SJeremy Fitzhardinge
1998d634bf2cSWei Liu if (feature_split_evtchn)
19992688fcb7SAndrew J. Bennieston err = setup_netfront_split(queue);
2000d634bf2cSWei Liu /* setup single event channel if
2001d634bf2cSWei Liu * a) feature-split-event-channels == 0
2002d634bf2cSWei Liu * b) feature-split-event-channels == 1 but failed to setup
2003d634bf2cSWei Liu */
2004e93fac3bSJiapeng Chong if (!feature_split_evtchn || err)
20052688fcb7SAndrew J. Bennieston err = setup_netfront_single(queue);
2006d634bf2cSWei Liu
20070d160211SJeremy Fitzhardinge if (err)
200866e3531bSJuergen Gross goto fail;
20090d160211SJeremy Fitzhardinge
20100d160211SJeremy Fitzhardinge return 0;
20110d160211SJeremy Fitzhardinge
20120d160211SJeremy Fitzhardinge fail:
201346e20d43SJuergen Gross xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
201446e20d43SJuergen Gross xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
201546e20d43SJuergen Gross
20160d160211SJeremy Fitzhardinge return err;
20170d160211SJeremy Fitzhardinge }
20180d160211SJeremy Fitzhardinge
20192688fcb7SAndrew J. Bennieston /* Queue-specific initialisation
20202688fcb7SAndrew J. Bennieston * This used to be done in xennet_create_dev() but must now
20212688fcb7SAndrew J. Bennieston * be run per-queue.
20222688fcb7SAndrew J. Bennieston */
xennet_init_queue(struct netfront_queue * queue)20232688fcb7SAndrew J. Bennieston static int xennet_init_queue(struct netfront_queue *queue)
20242688fcb7SAndrew J. Bennieston {
20252688fcb7SAndrew J. Bennieston unsigned short i;
20262688fcb7SAndrew J. Bennieston int err = 0;
202721f2706bSXiao Liang char *devid;
20282688fcb7SAndrew J. Bennieston
20292688fcb7SAndrew J. Bennieston spin_lock_init(&queue->tx_lock);
20302688fcb7SAndrew J. Bennieston spin_lock_init(&queue->rx_lock);
2031b27d4795SJuergen Gross spin_lock_init(&queue->rx_cons_lock);
20322688fcb7SAndrew J. Bennieston
2033e99e88a9SKees Cook timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
20342688fcb7SAndrew J. Bennieston
203521f2706bSXiao Liang devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
203621f2706bSXiao Liang snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
203721f2706bSXiao Liang devid, queue->id);
20388b715010SWei Liu
203921631d2dSJuergen Gross /* Initialise tx_skb_freelist as a free chain containing every entry. */
20402688fcb7SAndrew J. Bennieston queue->tx_skb_freelist = 0;
2041a884daa6SJuergen Gross queue->tx_pend_queue = TX_LINK_NONE;
20422688fcb7SAndrew J. Bennieston for (i = 0; i < NET_TX_RING_SIZE; i++) {
204321631d2dSJuergen Gross queue->tx_link[i] = i + 1;
2044145daab2SJuergen Gross queue->grant_tx_ref[i] = INVALID_GRANT_REF;
20452688fcb7SAndrew J. Bennieston queue->grant_tx_page[i] = NULL;
20462688fcb7SAndrew J. Bennieston }
204721631d2dSJuergen Gross queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
20482688fcb7SAndrew J. Bennieston
20492688fcb7SAndrew J. Bennieston /* Clear out rx_skbs */
20502688fcb7SAndrew J. Bennieston for (i = 0; i < NET_RX_RING_SIZE; i++) {
20512688fcb7SAndrew J. Bennieston queue->rx_skbs[i] = NULL;
2052145daab2SJuergen Gross queue->grant_rx_ref[i] = INVALID_GRANT_REF;
20532688fcb7SAndrew J. Bennieston }
20542688fcb7SAndrew J. Bennieston
20552688fcb7SAndrew J. Bennieston /* A grant for every tx ring slot */
20561f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
20572688fcb7SAndrew J. Bennieston &queue->gref_tx_head) < 0) {
20582688fcb7SAndrew J. Bennieston pr_alert("can't alloc tx grant refs\n");
20592688fcb7SAndrew J. Bennieston err = -ENOMEM;
20602688fcb7SAndrew J. Bennieston goto exit;
20612688fcb7SAndrew J. Bennieston }
20622688fcb7SAndrew J. Bennieston
20632688fcb7SAndrew J. Bennieston /* A grant for every rx ring slot */
20641f3c2ebaSDavid Vrabel if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
20652688fcb7SAndrew J. Bennieston &queue->gref_rx_head) < 0) {
20662688fcb7SAndrew J. Bennieston pr_alert("can't alloc rx grant refs\n");
20672688fcb7SAndrew J. Bennieston err = -ENOMEM;
20682688fcb7SAndrew J. Bennieston goto exit_free_tx;
20692688fcb7SAndrew J. Bennieston }
20702688fcb7SAndrew J. Bennieston
20712688fcb7SAndrew J. Bennieston return 0;
20722688fcb7SAndrew J. Bennieston
20732688fcb7SAndrew J. Bennieston exit_free_tx:
20742688fcb7SAndrew J. Bennieston gnttab_free_grant_references(queue->gref_tx_head);
20752688fcb7SAndrew J. Bennieston exit:
20762688fcb7SAndrew J. Bennieston return err;
20772688fcb7SAndrew J. Bennieston }
20782688fcb7SAndrew J. Bennieston
write_queue_xenstore_keys(struct netfront_queue * queue,struct xenbus_transaction * xbt,int write_hierarchical)207950ee6061SAndrew J. Bennieston static int write_queue_xenstore_keys(struct netfront_queue *queue,
208050ee6061SAndrew J. Bennieston struct xenbus_transaction *xbt, int write_hierarchical)
208150ee6061SAndrew J. Bennieston {
208250ee6061SAndrew J. Bennieston /* Write the queue-specific keys into XenStore in the traditional
208350ee6061SAndrew J. Bennieston * way for a single queue, or in a queue subkeys for multiple
208450ee6061SAndrew J. Bennieston * queues.
208550ee6061SAndrew J. Bennieston */
208650ee6061SAndrew J. Bennieston struct xenbus_device *dev = queue->info->xbdev;
208750ee6061SAndrew J. Bennieston int err;
208850ee6061SAndrew J. Bennieston const char *message;
208950ee6061SAndrew J. Bennieston char *path;
209050ee6061SAndrew J. Bennieston size_t pathsize;
209150ee6061SAndrew J. Bennieston
209250ee6061SAndrew J. Bennieston /* Choose the correct place to write the keys */
209350ee6061SAndrew J. Bennieston if (write_hierarchical) {
209450ee6061SAndrew J. Bennieston pathsize = strlen(dev->nodename) + 10;
209550ee6061SAndrew J. Bennieston path = kzalloc(pathsize, GFP_KERNEL);
209650ee6061SAndrew J. Bennieston if (!path) {
209750ee6061SAndrew J. Bennieston err = -ENOMEM;
209850ee6061SAndrew J. Bennieston message = "out of memory while writing ring references";
209950ee6061SAndrew J. Bennieston goto error;
210050ee6061SAndrew J. Bennieston }
210150ee6061SAndrew J. Bennieston snprintf(path, pathsize, "%s/queue-%u",
210250ee6061SAndrew J. Bennieston dev->nodename, queue->id);
210350ee6061SAndrew J. Bennieston } else {
210450ee6061SAndrew J. Bennieston path = (char *)dev->nodename;
210550ee6061SAndrew J. Bennieston }
210650ee6061SAndrew J. Bennieston
210750ee6061SAndrew J. Bennieston /* Write ring references */
210850ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
210950ee6061SAndrew J. Bennieston queue->tx_ring_ref);
211050ee6061SAndrew J. Bennieston if (err) {
211150ee6061SAndrew J. Bennieston message = "writing tx-ring-ref";
211250ee6061SAndrew J. Bennieston goto error;
211350ee6061SAndrew J. Bennieston }
211450ee6061SAndrew J. Bennieston
211550ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
211650ee6061SAndrew J. Bennieston queue->rx_ring_ref);
211750ee6061SAndrew J. Bennieston if (err) {
211850ee6061SAndrew J. Bennieston message = "writing rx-ring-ref";
211950ee6061SAndrew J. Bennieston goto error;
212050ee6061SAndrew J. Bennieston }
212150ee6061SAndrew J. Bennieston
212250ee6061SAndrew J. Bennieston /* Write event channels; taking into account both shared
212350ee6061SAndrew J. Bennieston * and split event channel scenarios.
212450ee6061SAndrew J. Bennieston */
212550ee6061SAndrew J. Bennieston if (queue->tx_evtchn == queue->rx_evtchn) {
212650ee6061SAndrew J. Bennieston /* Shared event channel */
212750ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path,
212850ee6061SAndrew J. Bennieston "event-channel", "%u", queue->tx_evtchn);
212950ee6061SAndrew J. Bennieston if (err) {
213050ee6061SAndrew J. Bennieston message = "writing event-channel";
213150ee6061SAndrew J. Bennieston goto error;
213250ee6061SAndrew J. Bennieston }
213350ee6061SAndrew J. Bennieston } else {
213450ee6061SAndrew J. Bennieston /* Split event channels */
213550ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path,
213650ee6061SAndrew J. Bennieston "event-channel-tx", "%u", queue->tx_evtchn);
213750ee6061SAndrew J. Bennieston if (err) {
213850ee6061SAndrew J. Bennieston message = "writing event-channel-tx";
213950ee6061SAndrew J. Bennieston goto error;
214050ee6061SAndrew J. Bennieston }
214150ee6061SAndrew J. Bennieston
214250ee6061SAndrew J. Bennieston err = xenbus_printf(*xbt, path,
214350ee6061SAndrew J. Bennieston "event-channel-rx", "%u", queue->rx_evtchn);
214450ee6061SAndrew J. Bennieston if (err) {
214550ee6061SAndrew J. Bennieston message = "writing event-channel-rx";
214650ee6061SAndrew J. Bennieston goto error;
214750ee6061SAndrew J. Bennieston }
214850ee6061SAndrew J. Bennieston }
214950ee6061SAndrew J. Bennieston
215050ee6061SAndrew J. Bennieston if (write_hierarchical)
215150ee6061SAndrew J. Bennieston kfree(path);
215250ee6061SAndrew J. Bennieston return 0;
215350ee6061SAndrew J. Bennieston
215450ee6061SAndrew J. Bennieston error:
215550ee6061SAndrew J. Bennieston if (write_hierarchical)
215650ee6061SAndrew J. Bennieston kfree(path);
215750ee6061SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "%s", message);
215850ee6061SAndrew J. Bennieston return err;
215950ee6061SAndrew J. Bennieston }
216050ee6061SAndrew J. Bennieston
21616c5aa6fcSDenis Kirjanov
21626c5aa6fcSDenis Kirjanov
xennet_create_page_pool(struct netfront_queue * queue)21636c5aa6fcSDenis Kirjanov static int xennet_create_page_pool(struct netfront_queue *queue)
21646c5aa6fcSDenis Kirjanov {
21656c5aa6fcSDenis Kirjanov int err;
21666c5aa6fcSDenis Kirjanov struct page_pool_params pp_params = {
21676c5aa6fcSDenis Kirjanov .order = 0,
21686c5aa6fcSDenis Kirjanov .flags = 0,
21696c5aa6fcSDenis Kirjanov .pool_size = NET_RX_RING_SIZE,
21706c5aa6fcSDenis Kirjanov .nid = NUMA_NO_NODE,
21716c5aa6fcSDenis Kirjanov .dev = &queue->info->netdev->dev,
21726c5aa6fcSDenis Kirjanov .offset = XDP_PACKET_HEADROOM,
21736c5aa6fcSDenis Kirjanov .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
21746c5aa6fcSDenis Kirjanov };
21756c5aa6fcSDenis Kirjanov
21766c5aa6fcSDenis Kirjanov queue->page_pool = page_pool_create(&pp_params);
21776c5aa6fcSDenis Kirjanov if (IS_ERR(queue->page_pool)) {
21786c5aa6fcSDenis Kirjanov err = PTR_ERR(queue->page_pool);
21796c5aa6fcSDenis Kirjanov queue->page_pool = NULL;
21806c5aa6fcSDenis Kirjanov return err;
21816c5aa6fcSDenis Kirjanov }
21826c5aa6fcSDenis Kirjanov
21836c5aa6fcSDenis Kirjanov err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2184b02e5a0eSBjörn Töpel queue->id, 0);
21856c5aa6fcSDenis Kirjanov if (err) {
21866c5aa6fcSDenis Kirjanov netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
21876c5aa6fcSDenis Kirjanov goto err_free_pp;
21886c5aa6fcSDenis Kirjanov }
21896c5aa6fcSDenis Kirjanov
21906c5aa6fcSDenis Kirjanov err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
21916c5aa6fcSDenis Kirjanov MEM_TYPE_PAGE_POOL, queue->page_pool);
21926c5aa6fcSDenis Kirjanov if (err) {
21936c5aa6fcSDenis Kirjanov netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
21946c5aa6fcSDenis Kirjanov goto err_unregister_rxq;
21956c5aa6fcSDenis Kirjanov }
21966c5aa6fcSDenis Kirjanov return 0;
21976c5aa6fcSDenis Kirjanov
21986c5aa6fcSDenis Kirjanov err_unregister_rxq:
21996c5aa6fcSDenis Kirjanov xdp_rxq_info_unreg(&queue->xdp_rxq);
22006c5aa6fcSDenis Kirjanov err_free_pp:
22016c5aa6fcSDenis Kirjanov page_pool_destroy(queue->page_pool);
22026c5aa6fcSDenis Kirjanov queue->page_pool = NULL;
22036c5aa6fcSDenis Kirjanov return err;
22046c5aa6fcSDenis Kirjanov }
22056c5aa6fcSDenis Kirjanov
xennet_create_queues(struct netfront_info * info,unsigned int * num_queues)2206ce58725fSDavid Vrabel static int xennet_create_queues(struct netfront_info *info,
2207ca88ea12SJoe Jin unsigned int *num_queues)
2208ce58725fSDavid Vrabel {
2209ce58725fSDavid Vrabel unsigned int i;
2210ce58725fSDavid Vrabel int ret;
2211ce58725fSDavid Vrabel
2212ca88ea12SJoe Jin info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2213ce58725fSDavid Vrabel GFP_KERNEL);
2214ce58725fSDavid Vrabel if (!info->queues)
2215ce58725fSDavid Vrabel return -ENOMEM;
2216ce58725fSDavid Vrabel
2217ca88ea12SJoe Jin for (i = 0; i < *num_queues; i++) {
2218ce58725fSDavid Vrabel struct netfront_queue *queue = &info->queues[i];
2219ce58725fSDavid Vrabel
2220ce58725fSDavid Vrabel queue->id = i;
2221ce58725fSDavid Vrabel queue->info = info;
2222ce58725fSDavid Vrabel
2223ce58725fSDavid Vrabel ret = xennet_init_queue(queue);
2224ce58725fSDavid Vrabel if (ret < 0) {
2225f599c64fSRoss Lagerwall dev_warn(&info->xbdev->dev,
222669cb8524SDavid Vrabel "only created %d queues\n", i);
2227ca88ea12SJoe Jin *num_queues = i;
2228ce58725fSDavid Vrabel break;
2229ce58725fSDavid Vrabel }
2230ce58725fSDavid Vrabel
22316c5aa6fcSDenis Kirjanov /* use page pool recycling instead of buddy allocator */
22326c5aa6fcSDenis Kirjanov ret = xennet_create_page_pool(queue);
22336c5aa6fcSDenis Kirjanov if (ret < 0) {
22346c5aa6fcSDenis Kirjanov dev_err(&info->xbdev->dev, "can't allocate page pool\n");
22356c5aa6fcSDenis Kirjanov *num_queues = i;
22366c5aa6fcSDenis Kirjanov return ret;
22376c5aa6fcSDenis Kirjanov }
22386c5aa6fcSDenis Kirjanov
2239b48b89f9SJakub Kicinski netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll);
2240ce58725fSDavid Vrabel if (netif_running(info->netdev))
2241ce58725fSDavid Vrabel napi_enable(&queue->napi);
2242ce58725fSDavid Vrabel }
2243ce58725fSDavid Vrabel
2244ca88ea12SJoe Jin netif_set_real_num_tx_queues(info->netdev, *num_queues);
2245ce58725fSDavid Vrabel
2246ca88ea12SJoe Jin if (*num_queues == 0) {
2247f599c64fSRoss Lagerwall dev_err(&info->xbdev->dev, "no queues\n");
2248ce58725fSDavid Vrabel return -EINVAL;
2249ce58725fSDavid Vrabel }
2250ce58725fSDavid Vrabel return 0;
2251ce58725fSDavid Vrabel }
2252ce58725fSDavid Vrabel
22530d160211SJeremy Fitzhardinge /* Common code used when first setting up, and when resuming. */
talk_to_netback(struct xenbus_device * dev,struct netfront_info * info)2254f502bf2bSIan Campbell static int talk_to_netback(struct xenbus_device *dev,
22550d160211SJeremy Fitzhardinge struct netfront_info *info)
22560d160211SJeremy Fitzhardinge {
22570d160211SJeremy Fitzhardinge const char *message;
22580d160211SJeremy Fitzhardinge struct xenbus_transaction xbt;
22590d160211SJeremy Fitzhardinge int err;
22602688fcb7SAndrew J. Bennieston unsigned int feature_split_evtchn;
22612688fcb7SAndrew J. Bennieston unsigned int i = 0;
226250ee6061SAndrew J. Bennieston unsigned int max_queues = 0;
22632688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL;
22642688fcb7SAndrew J. Bennieston unsigned int num_queues = 1;
226593772114SJakub Kicinski u8 addr[ETH_ALEN];
22660d160211SJeremy Fitzhardinge
22672688fcb7SAndrew J. Bennieston info->netdev->irq = 0;
22682688fcb7SAndrew J. Bennieston
22694491001cSRoger Pau Monne /* Check if backend is trusted. */
22704491001cSRoger Pau Monne info->bounce = !xennet_trusted ||
22714491001cSRoger Pau Monne !xenbus_read_unsigned(dev->nodename, "trusted", 1);
22724491001cSRoger Pau Monne
227350ee6061SAndrew J. Bennieston /* Check if backend supports multiple queues */
22742890ea5cSJuergen Gross max_queues = xenbus_read_unsigned(info->xbdev->otherend,
22752890ea5cSJuergen Gross "multi-queue-max-queues", 1);
227650ee6061SAndrew J. Bennieston num_queues = min(max_queues, xennet_max_queues);
227750ee6061SAndrew J. Bennieston
22782688fcb7SAndrew J. Bennieston /* Check feature-split-event-channels */
22792890ea5cSJuergen Gross feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
22802890ea5cSJuergen Gross "feature-split-event-channels", 0);
22812688fcb7SAndrew J. Bennieston
22822688fcb7SAndrew J. Bennieston /* Read mac addr. */
228393772114SJakub Kicinski err = xen_net_read_mac(dev, addr);
22842688fcb7SAndrew J. Bennieston if (err) {
22852688fcb7SAndrew J. Bennieston xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2286cb257783SRoss Lagerwall goto out_unlocked;
22872688fcb7SAndrew J. Bennieston }
228893772114SJakub Kicinski eth_hw_addr_set(info->netdev, addr);
22892688fcb7SAndrew J. Bennieston
22906c5aa6fcSDenis Kirjanov info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
22916c5aa6fcSDenis Kirjanov "feature-xdp-headroom", 0);
22926c5aa6fcSDenis Kirjanov if (info->netback_has_xdp_headroom) {
22936c5aa6fcSDenis Kirjanov /* set the current xen-netfront xdp state */
22946c5aa6fcSDenis Kirjanov err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
22956c5aa6fcSDenis Kirjanov NETBACK_XDP_HEADROOM_ENABLE :
22966c5aa6fcSDenis Kirjanov NETBACK_XDP_HEADROOM_DISABLE);
22976c5aa6fcSDenis Kirjanov if (err)
22986c5aa6fcSDenis Kirjanov goto out_unlocked;
22996c5aa6fcSDenis Kirjanov }
23006c5aa6fcSDenis Kirjanov
2301f599c64fSRoss Lagerwall rtnl_lock();
2302ce58725fSDavid Vrabel if (info->queues)
2303ce58725fSDavid Vrabel xennet_destroy_queues(info);
2304ce58725fSDavid Vrabel
2305a884daa6SJuergen Gross /* For the case of a reconnect reset the "broken" indicator. */
2306a884daa6SJuergen Gross info->broken = false;
2307a884daa6SJuergen Gross
2308ca88ea12SJoe Jin err = xennet_create_queues(info, &num_queues);
2309e2e004acSRoss Lagerwall if (err < 0) {
2310e2e004acSRoss Lagerwall xenbus_dev_fatal(dev, err, "creating queues");
2311e2e004acSRoss Lagerwall kfree(info->queues);
2312e2e004acSRoss Lagerwall info->queues = NULL;
2313e2e004acSRoss Lagerwall goto out;
2314e2e004acSRoss Lagerwall }
2315f599c64fSRoss Lagerwall rtnl_unlock();
23162688fcb7SAndrew J. Bennieston
23172688fcb7SAndrew J. Bennieston /* Create shared ring, alloc event channel -- for each queue */
23182688fcb7SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) {
23192688fcb7SAndrew J. Bennieston queue = &info->queues[i];
23202688fcb7SAndrew J. Bennieston err = setup_netfront(dev, queue, feature_split_evtchn);
2321e2e004acSRoss Lagerwall if (err)
23222688fcb7SAndrew J. Bennieston goto destroy_ring;
23232688fcb7SAndrew J. Bennieston }
23240d160211SJeremy Fitzhardinge
23250d160211SJeremy Fitzhardinge again:
23260d160211SJeremy Fitzhardinge err = xenbus_transaction_start(&xbt);
23270d160211SJeremy Fitzhardinge if (err) {
23280d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "starting transaction");
23290d160211SJeremy Fitzhardinge goto destroy_ring;
23300d160211SJeremy Fitzhardinge }
23310d160211SJeremy Fitzhardinge
2332812494d9Schas williams if (xenbus_exists(XBT_NIL,
2333812494d9Schas williams info->xbdev->otherend, "multi-queue-max-queues")) {
2334812494d9Schas williams /* Write the number of queues */
2335812494d9Schas williams err = xenbus_printf(xbt, dev->nodename,
2336812494d9Schas williams "multi-queue-num-queues", "%u", num_queues);
2337812494d9Schas williams if (err) {
2338812494d9Schas williams message = "writing multi-queue-num-queues";
2339812494d9Schas williams goto abort_transaction_no_dev_fatal;
2340812494d9Schas williams }
2341812494d9Schas williams }
2342812494d9Schas williams
234350ee6061SAndrew J. Bennieston if (num_queues == 1) {
234450ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
234550ee6061SAndrew J. Bennieston if (err)
234650ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal;
2347d634bf2cSWei Liu } else {
234850ee6061SAndrew J. Bennieston /* Write the keys for each queue */
234950ee6061SAndrew J. Bennieston for (i = 0; i < num_queues; ++i) {
235050ee6061SAndrew J. Bennieston queue = &info->queues[i];
235150ee6061SAndrew J. Bennieston err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
235250ee6061SAndrew J. Bennieston if (err)
235350ee6061SAndrew J. Bennieston goto abort_transaction_no_dev_fatal;
2354d634bf2cSWei Liu }
2355d634bf2cSWei Liu }
23560d160211SJeremy Fitzhardinge
235750ee6061SAndrew J. Bennieston /* The remaining keys are not queue-specific */
23580d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
23590d160211SJeremy Fitzhardinge 1);
23600d160211SJeremy Fitzhardinge if (err) {
23610d160211SJeremy Fitzhardinge message = "writing request-rx-copy";
23620d160211SJeremy Fitzhardinge goto abort_transaction;
23630d160211SJeremy Fitzhardinge }
23640d160211SJeremy Fitzhardinge
23650d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
23660d160211SJeremy Fitzhardinge if (err) {
23670d160211SJeremy Fitzhardinge message = "writing feature-rx-notify";
23680d160211SJeremy Fitzhardinge goto abort_transaction;
23690d160211SJeremy Fitzhardinge }
23700d160211SJeremy Fitzhardinge
23710d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
23720d160211SJeremy Fitzhardinge if (err) {
23730d160211SJeremy Fitzhardinge message = "writing feature-sg";
23740d160211SJeremy Fitzhardinge goto abort_transaction;
23750d160211SJeremy Fitzhardinge }
23760d160211SJeremy Fitzhardinge
23770d160211SJeremy Fitzhardinge err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
23780d160211SJeremy Fitzhardinge if (err) {
23790d160211SJeremy Fitzhardinge message = "writing feature-gso-tcpv4";
23800d160211SJeremy Fitzhardinge goto abort_transaction;
23810d160211SJeremy Fitzhardinge }
23820d160211SJeremy Fitzhardinge
23832c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
23842c0057deSPaul Durrant if (err) {
23852c0057deSPaul Durrant message = "writing feature-gso-tcpv6";
23862c0057deSPaul Durrant goto abort_transaction;
23872c0057deSPaul Durrant }
23882c0057deSPaul Durrant
23892c0057deSPaul Durrant err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
23902c0057deSPaul Durrant "1");
23912c0057deSPaul Durrant if (err) {
23922c0057deSPaul Durrant message = "writing feature-ipv6-csum-offload";
23932c0057deSPaul Durrant goto abort_transaction;
23942c0057deSPaul Durrant }
23952c0057deSPaul Durrant
23960d160211SJeremy Fitzhardinge err = xenbus_transaction_end(xbt, 0);
23970d160211SJeremy Fitzhardinge if (err) {
23980d160211SJeremy Fitzhardinge if (err == -EAGAIN)
23990d160211SJeremy Fitzhardinge goto again;
24000d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "completing transaction");
24010d160211SJeremy Fitzhardinge goto destroy_ring;
24020d160211SJeremy Fitzhardinge }
24030d160211SJeremy Fitzhardinge
24040d160211SJeremy Fitzhardinge return 0;
24050d160211SJeremy Fitzhardinge
24060d160211SJeremy Fitzhardinge abort_transaction:
24070d160211SJeremy Fitzhardinge xenbus_dev_fatal(dev, err, "%s", message);
240850ee6061SAndrew J. Bennieston abort_transaction_no_dev_fatal:
240950ee6061SAndrew J. Bennieston xenbus_transaction_end(xbt, 1);
24100d160211SJeremy Fitzhardinge destroy_ring:
24110d160211SJeremy Fitzhardinge xennet_disconnect_backend(info);
2412f599c64fSRoss Lagerwall rtnl_lock();
2413e2e004acSRoss Lagerwall xennet_destroy_queues(info);
24140d160211SJeremy Fitzhardinge out:
2415f599c64fSRoss Lagerwall rtnl_unlock();
2416cb257783SRoss Lagerwall out_unlocked:
2417d86b5672SVitaly Kuznetsov device_unregister(&dev->dev);
24180d160211SJeremy Fitzhardinge return err;
24190d160211SJeremy Fitzhardinge }
24200d160211SJeremy Fitzhardinge
xennet_connect(struct net_device * dev)24210d160211SJeremy Fitzhardinge static int xennet_connect(struct net_device *dev)
24220d160211SJeremy Fitzhardinge {
24230d160211SJeremy Fitzhardinge struct netfront_info *np = netdev_priv(dev);
24242688fcb7SAndrew J. Bennieston unsigned int num_queues = 0;
2425a5b5dc3cSDavid Vrabel int err;
24262688fcb7SAndrew J. Bennieston unsigned int j = 0;
24272688fcb7SAndrew J. Bennieston struct netfront_queue *queue = NULL;
24280d160211SJeremy Fitzhardinge
24292890ea5cSJuergen Gross if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
24300d160211SJeremy Fitzhardinge dev_info(&dev->dev,
2431898eb71cSJoe Perches "backend does not support copying receive path\n");
24320d160211SJeremy Fitzhardinge return -ENODEV;
24330d160211SJeremy Fitzhardinge }
24340d160211SJeremy Fitzhardinge
2435f502bf2bSIan Campbell err = talk_to_netback(np->xbdev, np);
24360d160211SJeremy Fitzhardinge if (err)
24370d160211SJeremy Fitzhardinge return err;
24386c5aa6fcSDenis Kirjanov if (np->netback_has_xdp_headroom)
24396c5aa6fcSDenis Kirjanov pr_info("backend supports XDP headroom\n");
24404491001cSRoger Pau Monne if (np->bounce)
24414491001cSRoger Pau Monne dev_info(&np->xbdev->dev,
24424491001cSRoger Pau Monne "bouncing transmitted data to zeroed pages\n");
24430d160211SJeremy Fitzhardinge
24442688fcb7SAndrew J. Bennieston /* talk_to_netback() sets the correct number of queues */
24452688fcb7SAndrew J. Bennieston num_queues = dev->real_num_tx_queues;
24462688fcb7SAndrew J. Bennieston
2447f599c64fSRoss Lagerwall if (dev->reg_state == NETREG_UNINITIALIZED) {
2448f599c64fSRoss Lagerwall err = register_netdev(dev);
2449f599c64fSRoss Lagerwall if (err) {
2450f599c64fSRoss Lagerwall pr_warn("%s: register_netdev err=%d\n", __func__, err);
2451f599c64fSRoss Lagerwall device_unregister(&np->xbdev->dev);
2452f599c64fSRoss Lagerwall return err;
2453f599c64fSRoss Lagerwall }
2454f599c64fSRoss Lagerwall }
2455f599c64fSRoss Lagerwall
245645c8184cSRoss Lagerwall rtnl_lock();
245745c8184cSRoss Lagerwall netdev_update_features(dev);
245845c8184cSRoss Lagerwall rtnl_unlock();
245945c8184cSRoss Lagerwall
24600d160211SJeremy Fitzhardinge /*
2461a5b5dc3cSDavid Vrabel * All public and private state should now be sane. Get
24620d160211SJeremy Fitzhardinge * ready to start sending and receiving packets and give the driver
24630d160211SJeremy Fitzhardinge * domain a kick because we've probably just requeued some
24640d160211SJeremy Fitzhardinge * packets.
24650d160211SJeremy Fitzhardinge */
2466042b2046SDongli Zhang netif_tx_lock_bh(np->netdev);
2467042b2046SDongli Zhang netif_device_attach(np->netdev);
2468042b2046SDongli Zhang netif_tx_unlock_bh(np->netdev);
2469042b2046SDongli Zhang
24700d160211SJeremy Fitzhardinge netif_carrier_on(np->netdev);
24712688fcb7SAndrew J. Bennieston for (j = 0; j < num_queues; ++j) {
24722688fcb7SAndrew J. Bennieston queue = &np->queues[j];
2473f50b4076SDavid Vrabel
24742688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->tx_irq);
24752688fcb7SAndrew J. Bennieston if (queue->tx_irq != queue->rx_irq)
24762688fcb7SAndrew J. Bennieston notify_remote_via_irq(queue->rx_irq);
24770d160211SJeremy Fitzhardinge
2478f50b4076SDavid Vrabel spin_lock_bh(&queue->rx_lock);
2479f50b4076SDavid Vrabel xennet_alloc_rx_buffers(queue);
24802688fcb7SAndrew J. Bennieston spin_unlock_bh(&queue->rx_lock);
24812688fcb7SAndrew J. Bennieston }
24820d160211SJeremy Fitzhardinge
24830d160211SJeremy Fitzhardinge return 0;
24840d160211SJeremy Fitzhardinge }
24850d160211SJeremy Fitzhardinge
248680708602SLee Jones /*
24870d160211SJeremy Fitzhardinge * Callback received when the backend's state changes.
24880d160211SJeremy Fitzhardinge */
netback_changed(struct xenbus_device * dev,enum xenbus_state backend_state)2489f502bf2bSIan Campbell static void netback_changed(struct xenbus_device *dev,
24900d160211SJeremy Fitzhardinge enum xenbus_state backend_state)
24910d160211SJeremy Fitzhardinge {
24921b713e00SGreg Kroah-Hartman struct netfront_info *np = dev_get_drvdata(&dev->dev);
24930d160211SJeremy Fitzhardinge struct net_device *netdev = np->netdev;
24940d160211SJeremy Fitzhardinge
24950d160211SJeremy Fitzhardinge dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
24960d160211SJeremy Fitzhardinge
24978edfe2e9SJuergen Gross wake_up_all(&module_wq);
24988edfe2e9SJuergen Gross
24990d160211SJeremy Fitzhardinge switch (backend_state) {
25000d160211SJeremy Fitzhardinge case XenbusStateInitialising:
25010d160211SJeremy Fitzhardinge case XenbusStateInitialised:
2502b78c9512SNoboru Iwamatsu case XenbusStateReconfiguring:
2503b78c9512SNoboru Iwamatsu case XenbusStateReconfigured:
25040d160211SJeremy Fitzhardinge case XenbusStateUnknown:
25050d160211SJeremy Fitzhardinge break;
25060d160211SJeremy Fitzhardinge
25070d160211SJeremy Fitzhardinge case XenbusStateInitWait:
25080d160211SJeremy Fitzhardinge if (dev->state != XenbusStateInitialising)
25090d160211SJeremy Fitzhardinge break;
25100d160211SJeremy Fitzhardinge if (xennet_connect(netdev) != 0)
25110d160211SJeremy Fitzhardinge break;
25120d160211SJeremy Fitzhardinge xenbus_switch_state(dev, XenbusStateConnected);
251308e34eb1SLaszlo Ersek break;
251408e34eb1SLaszlo Ersek
251508e34eb1SLaszlo Ersek case XenbusStateConnected:
2516ee89bab1SAmerigo Wang netdev_notify_peers(netdev);
25170d160211SJeremy Fitzhardinge break;
25180d160211SJeremy Fitzhardinge
2519bce3ea81SDavid Vrabel case XenbusStateClosed:
2520bce3ea81SDavid Vrabel if (dev->state == XenbusStateClosed)
2521bce3ea81SDavid Vrabel break;
2522df561f66SGustavo A. R. Silva fallthrough; /* Missed the backend's CLOSING state */
25230d160211SJeremy Fitzhardinge case XenbusStateClosing:
25240d160211SJeremy Fitzhardinge xenbus_frontend_closed(dev);
25250d160211SJeremy Fitzhardinge break;
25260d160211SJeremy Fitzhardinge }
25270d160211SJeremy Fitzhardinge }
25280d160211SJeremy Fitzhardinge
2529e0ce4af9SIan Campbell static const struct xennet_stat {
2530e0ce4af9SIan Campbell char name[ETH_GSTRING_LEN];
2531e0ce4af9SIan Campbell u16 offset;
2532e0ce4af9SIan Campbell } xennet_stats[] = {
2533e0ce4af9SIan Campbell {
2534e0ce4af9SIan Campbell "rx_gso_checksum_fixup",
2535e0ce4af9SIan Campbell offsetof(struct netfront_info, rx_gso_checksum_fixup)
2536e0ce4af9SIan Campbell },
2537e0ce4af9SIan Campbell };
2538e0ce4af9SIan Campbell
xennet_get_sset_count(struct net_device * dev,int string_set)2539e0ce4af9SIan Campbell static int xennet_get_sset_count(struct net_device *dev, int string_set)
2540e0ce4af9SIan Campbell {
2541e0ce4af9SIan Campbell switch (string_set) {
2542e0ce4af9SIan Campbell case ETH_SS_STATS:
2543e0ce4af9SIan Campbell return ARRAY_SIZE(xennet_stats);
2544e0ce4af9SIan Campbell default:
2545e0ce4af9SIan Campbell return -EINVAL;
2546e0ce4af9SIan Campbell }
2547e0ce4af9SIan Campbell }
2548e0ce4af9SIan Campbell
xennet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2549e0ce4af9SIan Campbell static void xennet_get_ethtool_stats(struct net_device *dev,
2550e0ce4af9SIan Campbell struct ethtool_stats *stats, u64 * data)
2551e0ce4af9SIan Campbell {
2552e0ce4af9SIan Campbell void *np = netdev_priv(dev);
2553e0ce4af9SIan Campbell int i;
2554e0ce4af9SIan Campbell
2555e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
25562688fcb7SAndrew J. Bennieston data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2557e0ce4af9SIan Campbell }
2558e0ce4af9SIan Campbell
xennet_get_strings(struct net_device * dev,u32 stringset,u8 * data)2559e0ce4af9SIan Campbell static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2560e0ce4af9SIan Campbell {
2561e0ce4af9SIan Campbell int i;
2562e0ce4af9SIan Campbell
2563e0ce4af9SIan Campbell switch (stringset) {
2564e0ce4af9SIan Campbell case ETH_SS_STATS:
2565e0ce4af9SIan Campbell for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2566e0ce4af9SIan Campbell memcpy(data + i * ETH_GSTRING_LEN,
2567e0ce4af9SIan Campbell xennet_stats[i].name, ETH_GSTRING_LEN);
2568e0ce4af9SIan Campbell break;
2569e0ce4af9SIan Campbell }
2570e0ce4af9SIan Campbell }
2571e0ce4af9SIan Campbell
25720fc0b732SStephen Hemminger static const struct ethtool_ops xennet_ethtool_ops =
25730d160211SJeremy Fitzhardinge {
25740d160211SJeremy Fitzhardinge .get_link = ethtool_op_get_link,
2575e0ce4af9SIan Campbell
2576e0ce4af9SIan Campbell .get_sset_count = xennet_get_sset_count,
2577e0ce4af9SIan Campbell .get_ethtool_stats = xennet_get_ethtool_stats,
2578e0ce4af9SIan Campbell .get_strings = xennet_get_strings,
257991ffb9d3SDaniel Drown .get_ts_info = ethtool_op_get_ts_info,
25800d160211SJeremy Fitzhardinge };
25810d160211SJeremy Fitzhardinge
25820d160211SJeremy Fitzhardinge #ifdef CONFIG_SYSFS
show_rxbuf(struct device * dev,struct device_attribute * attr,char * buf)25831f3c2ebaSDavid Vrabel static ssize_t show_rxbuf(struct device *dev,
25840d160211SJeremy Fitzhardinge struct device_attribute *attr, char *buf)
25850d160211SJeremy Fitzhardinge {
25861f3c2ebaSDavid Vrabel return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
25870d160211SJeremy Fitzhardinge }
25880d160211SJeremy Fitzhardinge
store_rxbuf(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)25891f3c2ebaSDavid Vrabel static ssize_t store_rxbuf(struct device *dev,
25900d160211SJeremy Fitzhardinge struct device_attribute *attr,
25910d160211SJeremy Fitzhardinge const char *buf, size_t len)
25920d160211SJeremy Fitzhardinge {
25930d160211SJeremy Fitzhardinge char *endp;
25940d160211SJeremy Fitzhardinge
25950d160211SJeremy Fitzhardinge if (!capable(CAP_NET_ADMIN))
25960d160211SJeremy Fitzhardinge return -EPERM;
25970d160211SJeremy Fitzhardinge
25988ed7ec13SAndrew Lunn simple_strtoul(buf, &endp, 0);
25990d160211SJeremy Fitzhardinge if (endp == buf)
26000d160211SJeremy Fitzhardinge return -EBADMSG;
26010d160211SJeremy Fitzhardinge
26021f3c2ebaSDavid Vrabel /* rxbuf_min and rxbuf_max are no longer configurable. */
26030d160211SJeremy Fitzhardinge
26040d160211SJeremy Fitzhardinge return len;
26050d160211SJeremy Fitzhardinge }
26060d160211SJeremy Fitzhardinge
2607d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2608d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2609d61e4038SJoe Perches static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
261027b917e5STakashi Iwai
261127b917e5STakashi Iwai static struct attribute *xennet_dev_attrs[] = {
261227b917e5STakashi Iwai &dev_attr_rxbuf_min.attr,
261327b917e5STakashi Iwai &dev_attr_rxbuf_max.attr,
261427b917e5STakashi Iwai &dev_attr_rxbuf_cur.attr,
261527b917e5STakashi Iwai NULL
26160d160211SJeremy Fitzhardinge };
26170d160211SJeremy Fitzhardinge
261827b917e5STakashi Iwai static const struct attribute_group xennet_dev_group = {
261927b917e5STakashi Iwai .attrs = xennet_dev_attrs
262027b917e5STakashi Iwai };
26210d160211SJeremy Fitzhardinge #endif /* CONFIG_SYSFS */
26220d160211SJeremy Fitzhardinge
xennet_bus_close(struct xenbus_device * dev)2623c2c63310SAndrea Righi static void xennet_bus_close(struct xenbus_device *dev)
2624c2c63310SAndrea Righi {
2625c2c63310SAndrea Righi int ret;
2626c2c63310SAndrea Righi
2627c2c63310SAndrea Righi if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2628c2c63310SAndrea Righi return;
2629c2c63310SAndrea Righi do {
2630c2c63310SAndrea Righi xenbus_switch_state(dev, XenbusStateClosing);
2631c2c63310SAndrea Righi ret = wait_event_timeout(module_wq,
2632c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) ==
2633c2c63310SAndrea Righi XenbusStateClosing ||
2634c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) ==
2635c2c63310SAndrea Righi XenbusStateClosed ||
2636c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) ==
2637c2c63310SAndrea Righi XenbusStateUnknown,
2638c2c63310SAndrea Righi XENNET_TIMEOUT);
2639c2c63310SAndrea Righi } while (!ret);
2640c2c63310SAndrea Righi
2641c2c63310SAndrea Righi if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2642c2c63310SAndrea Righi return;
2643c2c63310SAndrea Righi
2644c2c63310SAndrea Righi do {
2645c2c63310SAndrea Righi xenbus_switch_state(dev, XenbusStateClosed);
2646c2c63310SAndrea Righi ret = wait_event_timeout(module_wq,
2647c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) ==
2648c2c63310SAndrea Righi XenbusStateClosed ||
2649c2c63310SAndrea Righi xenbus_read_driver_state(dev->otherend) ==
2650c2c63310SAndrea Righi XenbusStateUnknown,
2651c2c63310SAndrea Righi XENNET_TIMEOUT);
2652c2c63310SAndrea Righi } while (!ret);
2653c2c63310SAndrea Righi }
2654c2c63310SAndrea Righi
xennet_remove(struct xenbus_device * dev)26557cffcadeSDawei Li static void xennet_remove(struct xenbus_device *dev)
26560d160211SJeremy Fitzhardinge {
26571b713e00SGreg Kroah-Hartman struct netfront_info *info = dev_get_drvdata(&dev->dev);
26580d160211SJeremy Fitzhardinge
2659c2c63310SAndrea Righi xennet_bus_close(dev);
26600d160211SJeremy Fitzhardinge xennet_disconnect_backend(info);
26610d160211SJeremy Fitzhardinge
2662f599c64fSRoss Lagerwall if (info->netdev->reg_state == NETREG_REGISTERED)
26636bc96d04SIan Campbell unregister_netdev(info->netdev);
26646bc96d04SIan Campbell
2665f599c64fSRoss Lagerwall if (info->queues) {
2666f599c64fSRoss Lagerwall rtnl_lock();
2667ad068118SDavid Vrabel xennet_destroy_queues(info);
2668f599c64fSRoss Lagerwall rtnl_unlock();
2669f599c64fSRoss Lagerwall }
2670900e1833SDavid Vrabel xennet_free_netdev(info->netdev);
26710d160211SJeremy Fitzhardinge }
26720d160211SJeremy Fitzhardinge
267395afae48SDavid Vrabel static const struct xenbus_device_id netfront_ids[] = {
267495afae48SDavid Vrabel { "vif" },
267595afae48SDavid Vrabel { "" }
267695afae48SDavid Vrabel };
267795afae48SDavid Vrabel
267895afae48SDavid Vrabel static struct xenbus_driver netfront_driver = {
267995afae48SDavid Vrabel .ids = netfront_ids,
26800d160211SJeremy Fitzhardinge .probe = netfront_probe,
26818e0e46bbSBill Pemberton .remove = xennet_remove,
26820d160211SJeremy Fitzhardinge .resume = netfront_resume,
2683f502bf2bSIan Campbell .otherend_changed = netback_changed,
268495afae48SDavid Vrabel };
26850d160211SJeremy Fitzhardinge
netif_init(void)26860d160211SJeremy Fitzhardinge static int __init netif_init(void)
26870d160211SJeremy Fitzhardinge {
26886e833587SJeremy Fitzhardinge if (!xen_domain())
26890d160211SJeremy Fitzhardinge return -ENODEV;
26900d160211SJeremy Fitzhardinge
269151c71a3bSKonrad Rzeszutek Wilk if (!xen_has_pv_nic_devices())
2692b9136d20SIgor Mammedov return -ENODEV;
2693b9136d20SIgor Mammedov
2694383eda32SJoe Perches pr_info("Initialising Xen virtual ethernet driver\n");
26950d160211SJeremy Fitzhardinge
2696034702a6SJuergen Gross /* Allow as many queues as there are CPUs inut max. 8 if user has not
269732a84405SWei Liu * specified a value.
269832a84405SWei Liu */
269932a84405SWei Liu if (xennet_max_queues == 0)
2700034702a6SJuergen Gross xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2701034702a6SJuergen Gross num_online_cpus());
270250ee6061SAndrew J. Bennieston
2703ffb78a26SAl Viro return xenbus_register_frontend(&netfront_driver);
27040d160211SJeremy Fitzhardinge }
27050d160211SJeremy Fitzhardinge module_init(netif_init);
27060d160211SJeremy Fitzhardinge
27070d160211SJeremy Fitzhardinge
netif_exit(void)27080d160211SJeremy Fitzhardinge static void __exit netif_exit(void)
27090d160211SJeremy Fitzhardinge {
2710ffb78a26SAl Viro xenbus_unregister_driver(&netfront_driver);
27110d160211SJeremy Fitzhardinge }
27120d160211SJeremy Fitzhardinge module_exit(netif_exit);
27130d160211SJeremy Fitzhardinge
27140d160211SJeremy Fitzhardinge MODULE_DESCRIPTION("Xen virtual network device frontend");
27150d160211SJeremy Fitzhardinge MODULE_LICENSE("GPL");
2716d2f0c52bSMark McLoughlin MODULE_ALIAS("xen:vif");
27174f93f09bSMark McLoughlin MODULE_ALIAS("xennet");
2718