1f942dc25SIan Campbell /*
2f942dc25SIan Campbell * Back-end of the driver for virtual network devices. This portion of the
3f942dc25SIan Campbell * driver exports a 'unified' network-device interface that can be accessed
4f942dc25SIan Campbell * by any operating system that implements a compatible front end. A
5f942dc25SIan Campbell * reference front-end implementation can be found in:
6f942dc25SIan Campbell * drivers/net/xen-netfront.c
7f942dc25SIan Campbell *
8f942dc25SIan Campbell * Copyright (c) 2002-2005, K A Fraser
9f942dc25SIan Campbell *
10f942dc25SIan Campbell * This program is free software; you can redistribute it and/or
11f942dc25SIan Campbell * modify it under the terms of the GNU General Public License version 2
12f942dc25SIan Campbell * as published by the Free Software Foundation; or, when distributed
13f942dc25SIan Campbell * separately from the Linux kernel or incorporated into other
14f942dc25SIan Campbell * software packages, subject to the following license:
15f942dc25SIan Campbell *
16f942dc25SIan Campbell * Permission is hereby granted, free of charge, to any person obtaining a copy
17f942dc25SIan Campbell * of this source file (the "Software"), to deal in the Software without
18f942dc25SIan Campbell * restriction, including without limitation the rights to use, copy, modify,
19f942dc25SIan Campbell * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20f942dc25SIan Campbell * and to permit persons to whom the Software is furnished to do so, subject to
21f942dc25SIan Campbell * the following conditions:
22f942dc25SIan Campbell *
23f942dc25SIan Campbell * The above copyright notice and this permission notice shall be included in
24f942dc25SIan Campbell * all copies or substantial portions of the Software.
25f942dc25SIan Campbell *
26f942dc25SIan Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27f942dc25SIan Campbell * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28f942dc25SIan Campbell * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29f942dc25SIan Campbell * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30f942dc25SIan Campbell * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31f942dc25SIan Campbell * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32f942dc25SIan Campbell * IN THE SOFTWARE.
33f942dc25SIan Campbell */
34f942dc25SIan Campbell
35f942dc25SIan Campbell #include "common.h"
36f942dc25SIan Campbell
37f942dc25SIan Campbell #include <linux/kthread.h>
38f942dc25SIan Campbell #include <linux/if_vlan.h>
39f942dc25SIan Campbell #include <linux/udp.h>
40e3377f36SZoltan Kiss #include <linux/highmem.h>
41f942dc25SIan Campbell
42f942dc25SIan Campbell #include <net/tcp.h>
43f942dc25SIan Campbell
44ca981633SStefano Stabellini #include <xen/xen.h>
45f942dc25SIan Campbell #include <xen/events.h>
46f942dc25SIan Campbell #include <xen/interface/memory.h>
47a9fd60e2SJulien Grall #include <xen/page.h>
48f942dc25SIan Campbell
49f942dc25SIan Campbell #include <asm/xen/hypercall.h>
50f942dc25SIan Campbell
51e1f00a69SWei Liu /* Provide an option to disable split event channels at load time as
52e1f00a69SWei Liu * event channels are limited resource. Split event channels are
53e1f00a69SWei Liu * enabled by default.
54e1f00a69SWei Liu */
55c489dbb1SShailendra Verma bool separate_tx_rx_irq = true;
56e1f00a69SWei Liu module_param(separate_tx_rx_irq, bool, 0644);
57e1f00a69SWei Liu
58f48da8b1SDavid Vrabel /* The time that packets can stay on the guest Rx internal queue
59f48da8b1SDavid Vrabel * before they are dropped.
6009350788SZoltan Kiss */
6109350788SZoltan Kiss unsigned int rx_drain_timeout_msecs = 10000;
6209350788SZoltan Kiss module_param(rx_drain_timeout_msecs, uint, 0444);
6309350788SZoltan Kiss
64ecf08d2dSDavid Vrabel /* The length of time before the frontend is considered unresponsive
65ecf08d2dSDavid Vrabel * because it isn't providing Rx slots.
66ecf08d2dSDavid Vrabel */
6726c0e102SDavid Vrabel unsigned int rx_stall_timeout_msecs = 60000;
68ecf08d2dSDavid Vrabel module_param(rx_stall_timeout_msecs, uint, 0444);
69ecf08d2dSDavid Vrabel
7056dd5af9SJuergen Gross #define MAX_QUEUES_DEFAULT 8
718d3d53b3SAndrew J. Bennieston unsigned int xenvif_max_queues;
728d3d53b3SAndrew J. Bennieston module_param_named(max_queues, xenvif_max_queues, uint, 0644);
738d3d53b3SAndrew J. Bennieston MODULE_PARM_DESC(max_queues,
748d3d53b3SAndrew J. Bennieston "Maximum number of queues per virtual interface");
758d3d53b3SAndrew J. Bennieston
762810e5b9SWei Liu /*
772810e5b9SWei Liu * This is the maximum slots a skb can have. If a guest sends a skb
782810e5b9SWei Liu * which exceeds this limit it is considered malicious.
792810e5b9SWei Liu */
8037641494SWei Liu #define FATAL_SKB_SLOTS_DEFAULT 20
8137641494SWei Liu static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
8237641494SWei Liu module_param(fatal_skb_slots, uint, 0444);
8337641494SWei Liu
847e5d7753SMalcolm Crossley /* The amount to copy out of the first guest Tx slot into the skb's
857e5d7753SMalcolm Crossley * linear area. If the first slot has more data, it will be mapped
867e5d7753SMalcolm Crossley * and put into the first frag.
877e5d7753SMalcolm Crossley *
887e5d7753SMalcolm Crossley * This is sized to avoid pulling headers from the frags for most
897e5d7753SMalcolm Crossley * TCP/IP packets.
907e5d7753SMalcolm Crossley */
917e5d7753SMalcolm Crossley #define XEN_NETBACK_TX_COPY_LEN 128
927e5d7753SMalcolm Crossley
9340d8abdeSPaul Durrant /* This is the maximum number of flows in the hash cache. */
9440d8abdeSPaul Durrant #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
9540d8abdeSPaul Durrant unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
9640d8abdeSPaul Durrant module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
9740d8abdeSPaul Durrant MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
987e5d7753SMalcolm Crossley
991c9535c7SDenis Kirjanov /* The module parameter tells that we have to put data
1001c9535c7SDenis Kirjanov * for xen-netfront with the XDP_PACKET_HEADROOM offset
1011c9535c7SDenis Kirjanov * needed for XDP processing
1021c9535c7SDenis Kirjanov */
1031c9535c7SDenis Kirjanov bool provides_xdp_headroom = true;
1041c9535c7SDenis Kirjanov module_param(provides_xdp_headroom, bool, 0644);
1051c9535c7SDenis Kirjanov
106e9ce7cb6SWei Liu static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
107*2dc2b0a4SJan Beulich s8 status);
1087376419aSWei Liu
109e9ce7cb6SWei Liu static void make_tx_response(struct xenvif_queue *queue,
110*2dc2b0a4SJan Beulich const struct xen_netif_tx_request *txp,
111562abd39SPaul Durrant unsigned int extra_count,
112*2dc2b0a4SJan Beulich s8 status);
113b3f980bdSWei Liu
1145834e72eSJuergen Gross static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
1155834e72eSJuergen Gross
116e9ce7cb6SWei Liu static inline int tx_work_todo(struct xenvif_queue *queue);
117b3f980bdSWei Liu
idx_to_pfn(struct xenvif_queue * queue,u16 idx)118e9ce7cb6SWei Liu static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
119ea066ad1SIan Campbell u16 idx)
120f942dc25SIan Campbell {
121e9ce7cb6SWei Liu return page_to_pfn(queue->mmap_pages[idx]);
122f942dc25SIan Campbell }
123f942dc25SIan Campbell
idx_to_kaddr(struct xenvif_queue * queue,u16 idx)124e9ce7cb6SWei Liu static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
125ea066ad1SIan Campbell u16 idx)
126f942dc25SIan Campbell {
127e9ce7cb6SWei Liu return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
128f942dc25SIan Campbell }
129f942dc25SIan Campbell
1307aceb47aSZoltan Kiss #define callback_param(vif, pending_idx) \
1317aceb47aSZoltan Kiss (vif->pending_tx_info[pending_idx].callback_struct)
1327aceb47aSZoltan Kiss
133f53c3fe8SZoltan Kiss /* Find the containing VIF's structure from a pointer in pending_tx_info array
134f53c3fe8SZoltan Kiss */
ubuf_to_queue(const struct ubuf_info_msgzc * ubuf)135b63ca3e8SPavel Begunkov static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
1363e2234b3SZoltan Kiss {
137f53c3fe8SZoltan Kiss u16 pending_idx = ubuf->desc;
138f53c3fe8SZoltan Kiss struct pending_tx_info *temp =
139f53c3fe8SZoltan Kiss container_of(ubuf, struct pending_tx_info, callback_struct);
140f53c3fe8SZoltan Kiss return container_of(temp - pending_idx,
141e9ce7cb6SWei Liu struct xenvif_queue,
142f53c3fe8SZoltan Kiss pending_tx_info[0]);
1433e2234b3SZoltan Kiss }
144f53c3fe8SZoltan Kiss
frag_get_pending_idx(skb_frag_t * frag)145ea066ad1SIan Campbell static u16 frag_get_pending_idx(skb_frag_t *frag)
146ea066ad1SIan Campbell {
147b54c9d5bSJonathan Lemon return (u16)skb_frag_off(frag);
148ea066ad1SIan Campbell }
149ea066ad1SIan Campbell
frag_set_pending_idx(skb_frag_t * frag,u16 pending_idx)150ea066ad1SIan Campbell static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
151ea066ad1SIan Campbell {
152b54c9d5bSJonathan Lemon skb_frag_off_set(frag, pending_idx);
153ea066ad1SIan Campbell }
154ea066ad1SIan Campbell
pending_index(unsigned i)155f942dc25SIan Campbell static inline pending_ring_idx_t pending_index(unsigned i)
156f942dc25SIan Campbell {
157f942dc25SIan Campbell return i & (MAX_PENDING_REQS-1);
158f942dc25SIan Campbell }
159f942dc25SIan Campbell
xenvif_kick_thread(struct xenvif_queue * queue)160e9ce7cb6SWei Liu void xenvif_kick_thread(struct xenvif_queue *queue)
161f942dc25SIan Campbell {
162e9ce7cb6SWei Liu wake_up(&queue->wq);
163b3f980bdSWei Liu }
164b3f980bdSWei Liu
xenvif_napi_schedule_or_enable_events(struct xenvif_queue * queue)165e9ce7cb6SWei Liu void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
166f942dc25SIan Campbell {
167f942dc25SIan Campbell int more_to_do;
168f942dc25SIan Campbell
169e9ce7cb6SWei Liu RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
170f942dc25SIan Campbell
171f942dc25SIan Campbell if (more_to_do)
172e9ce7cb6SWei Liu napi_schedule(&queue->napi);
17323025393SJuergen Gross else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
17423025393SJuergen Gross &queue->eoi_pending) &
17523025393SJuergen Gross (NETBK_TX_EOI | NETBK_COMMON_EOI))
17623025393SJuergen Gross xen_irq_lateeoi(queue->tx_irq, 0);
177f942dc25SIan Campbell }
178f942dc25SIan Campbell
tx_add_credit(struct xenvif_queue * queue)179e9ce7cb6SWei Liu static void tx_add_credit(struct xenvif_queue *queue)
180f942dc25SIan Campbell {
181f942dc25SIan Campbell unsigned long max_burst, max_credit;
182f942dc25SIan Campbell
183f942dc25SIan Campbell /*
184f942dc25SIan Campbell * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
185f942dc25SIan Campbell * Otherwise the interface can seize up due to insufficient credit.
186f942dc25SIan Campbell */
1870f589967SDavid Vrabel max_burst = max(131072UL, queue->credit_bytes);
188f942dc25SIan Campbell
189f942dc25SIan Campbell /* Take care that adding a new chunk of credit doesn't wrap to zero. */
190e9ce7cb6SWei Liu max_credit = queue->remaining_credit + queue->credit_bytes;
191e9ce7cb6SWei Liu if (max_credit < queue->remaining_credit)
192f942dc25SIan Campbell max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
193f942dc25SIan Campbell
194e9ce7cb6SWei Liu queue->remaining_credit = min(max_credit, max_burst);
195dfa523aeSWei Liu queue->rate_limited = false;
196f942dc25SIan Campbell }
197f942dc25SIan Campbell
xenvif_tx_credit_callback(struct timer_list * t)198cac6a8f9SKees Cook void xenvif_tx_credit_callback(struct timer_list *t)
199f942dc25SIan Campbell {
200cac6a8f9SKees Cook struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
201e9ce7cb6SWei Liu tx_add_credit(queue);
202e9ce7cb6SWei Liu xenvif_napi_schedule_or_enable_events(queue);
203f942dc25SIan Campbell }
204f942dc25SIan Campbell
xenvif_tx_err(struct xenvif_queue * queue,struct xen_netif_tx_request * txp,unsigned int extra_count,RING_IDX end)205e9ce7cb6SWei Liu static void xenvif_tx_err(struct xenvif_queue *queue,
206562abd39SPaul Durrant struct xen_netif_tx_request *txp,
207562abd39SPaul Durrant unsigned int extra_count, RING_IDX end)
208f942dc25SIan Campbell {
209e9ce7cb6SWei Liu RING_IDX cons = queue->tx.req_cons;
210f942dc25SIan Campbell
211f942dc25SIan Campbell do {
212562abd39SPaul Durrant make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
213b9149729SIan Campbell if (cons == end)
214f942dc25SIan Campbell break;
21568a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, cons++, txp);
21672eec92aSPaul Durrant extra_count = 0; /* only the first frag can have extras */
217f942dc25SIan Campbell } while (1);
218e9ce7cb6SWei Liu queue->tx.req_cons = cons;
219f942dc25SIan Campbell }
220f942dc25SIan Campbell
xenvif_fatal_tx_err(struct xenvif * vif)2217376419aSWei Liu static void xenvif_fatal_tx_err(struct xenvif *vif)
22248856286SIan Campbell {
22348856286SIan Campbell netdev_err(vif->dev, "fatal error; disabling device\n");
224e9d8b2c2SWei Liu vif->disabled = true;
225e9ce7cb6SWei Liu /* Disable the vif from queue 0's kthread */
226b17075d5SIgor Druzhinin if (vif->num_queues)
227e9ce7cb6SWei Liu xenvif_kick_thread(&vif->queues[0]);
22848856286SIan Campbell }
22948856286SIan Campbell
xenvif_count_requests(struct xenvif_queue * queue,struct xen_netif_tx_request * first,unsigned int extra_count,struct xen_netif_tx_request * txp,int work_to_do)230e9ce7cb6SWei Liu static int xenvif_count_requests(struct xenvif_queue *queue,
231f942dc25SIan Campbell struct xen_netif_tx_request *first,
232562abd39SPaul Durrant unsigned int extra_count,
233f942dc25SIan Campbell struct xen_netif_tx_request *txp,
234f942dc25SIan Campbell int work_to_do)
235f942dc25SIan Campbell {
236e9ce7cb6SWei Liu RING_IDX cons = queue->tx.req_cons;
2372810e5b9SWei Liu int slots = 0;
2382810e5b9SWei Liu int drop_err = 0;
23959ccb4ebSWei Liu int more_data;
240f942dc25SIan Campbell
241f942dc25SIan Campbell if (!(first->flags & XEN_NETTXF_more_data))
242f942dc25SIan Campbell return 0;
243f942dc25SIan Campbell
244f942dc25SIan Campbell do {
24559ccb4ebSWei Liu struct xen_netif_tx_request dropped_tx = { 0 };
24659ccb4ebSWei Liu
2472810e5b9SWei Liu if (slots >= work_to_do) {
248e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
2492810e5b9SWei Liu "Asked for %d slots but exceeds this limit\n",
2502810e5b9SWei Liu work_to_do);
251e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
25235876b5fSDavid Vrabel return -ENODATA;
253f942dc25SIan Campbell }
254f942dc25SIan Campbell
2552810e5b9SWei Liu /* This guest is really using too many slots and
2562810e5b9SWei Liu * considered malicious.
2572810e5b9SWei Liu */
25837641494SWei Liu if (unlikely(slots >= fatal_skb_slots)) {
259e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
2602810e5b9SWei Liu "Malicious frontend using %d slots, threshold %u\n",
26137641494SWei Liu slots, fatal_skb_slots);
262e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
26335876b5fSDavid Vrabel return -E2BIG;
264f942dc25SIan Campbell }
265f942dc25SIan Campbell
2662810e5b9SWei Liu /* Xen network protocol had implicit dependency on
26737641494SWei Liu * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
26837641494SWei Liu * the historical MAX_SKB_FRAGS value 18 to honor the
26937641494SWei Liu * same behavior as before. Any packet using more than
27037641494SWei Liu * 18 slots but less than fatal_skb_slots slots is
27137641494SWei Liu * dropped
2722810e5b9SWei Liu */
27337641494SWei Liu if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
2742810e5b9SWei Liu if (net_ratelimit())
275e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
2762810e5b9SWei Liu "Too many slots (%d) exceeding limit (%d), dropping packet\n",
27737641494SWei Liu slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2782810e5b9SWei Liu drop_err = -E2BIG;
2792810e5b9SWei Liu }
2802810e5b9SWei Liu
28159ccb4ebSWei Liu if (drop_err)
28259ccb4ebSWei Liu txp = &dropped_tx;
28359ccb4ebSWei Liu
28468a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
28503393fd5SWei Liu
28603393fd5SWei Liu /* If the guest submitted a frame >= 64 KiB then
28703393fd5SWei Liu * first->size overflowed and following slots will
28803393fd5SWei Liu * appear to be larger than the frame.
28903393fd5SWei Liu *
29003393fd5SWei Liu * This cannot be fatal error as there are buggy
29103393fd5SWei Liu * frontends that do this.
29203393fd5SWei Liu *
29303393fd5SWei Liu * Consume all slots and drop the packet.
29403393fd5SWei Liu */
29503393fd5SWei Liu if (!drop_err && txp->size > first->size) {
29603393fd5SWei Liu if (net_ratelimit())
297e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
2982810e5b9SWei Liu "Invalid tx request, slot size %u > remaining size %u\n",
2992810e5b9SWei Liu txp->size, first->size);
30003393fd5SWei Liu drop_err = -EIO;
301f942dc25SIan Campbell }
302f942dc25SIan Campbell
303f942dc25SIan Campbell first->size -= txp->size;
3042810e5b9SWei Liu slots++;
305f942dc25SIan Campbell
306d0089e8aSJulien Grall if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
30768946159SJulien Grall netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
308f942dc25SIan Campbell txp->offset, txp->size);
309e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
31035876b5fSDavid Vrabel return -EINVAL;
311f942dc25SIan Campbell }
31259ccb4ebSWei Liu
31359ccb4ebSWei Liu more_data = txp->flags & XEN_NETTXF_more_data;
31459ccb4ebSWei Liu
31559ccb4ebSWei Liu if (!drop_err)
31659ccb4ebSWei Liu txp++;
31759ccb4ebSWei Liu
31859ccb4ebSWei Liu } while (more_data);
3192810e5b9SWei Liu
3202810e5b9SWei Liu if (drop_err) {
321562abd39SPaul Durrant xenvif_tx_err(queue, first, extra_count, cons + slots);
3222810e5b9SWei Liu return drop_err;
3232810e5b9SWei Liu }
3242810e5b9SWei Liu
3252810e5b9SWei Liu return slots;
326f942dc25SIan Campbell }
327f942dc25SIan Campbell
3288f13dd96SZoltan Kiss
3298f13dd96SZoltan Kiss struct xenvif_tx_cb {
330ad7f402aSRoss Lagerwall u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
331ad7f402aSRoss Lagerwall u8 copy_count;
33205310f31SJuergen Gross u32 split_mask;
3338f13dd96SZoltan Kiss };
3348f13dd96SZoltan Kiss
3358f13dd96SZoltan Kiss #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
336ad7f402aSRoss Lagerwall #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
337ad7f402aSRoss Lagerwall #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
3388f13dd96SZoltan Kiss
xenvif_tx_create_map_op(struct xenvif_queue * queue,u16 pending_idx,struct xen_netif_tx_request * txp,unsigned int extra_count,struct gnttab_map_grant_ref * mop)339e9ce7cb6SWei Liu static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
340f53c3fe8SZoltan Kiss u16 pending_idx,
341f53c3fe8SZoltan Kiss struct xen_netif_tx_request *txp,
342562abd39SPaul Durrant unsigned int extra_count,
3439074ce24SZoltan Kiss struct gnttab_map_grant_ref *mop)
344f53c3fe8SZoltan Kiss {
345e9ce7cb6SWei Liu queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
346e9ce7cb6SWei Liu gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
347f53c3fe8SZoltan Kiss GNTMAP_host_map | GNTMAP_readonly,
348e9ce7cb6SWei Liu txp->gref, queue->vif->domid);
349f53c3fe8SZoltan Kiss
350e9ce7cb6SWei Liu memcpy(&queue->pending_tx_info[pending_idx].req, txp,
351f53c3fe8SZoltan Kiss sizeof(*txp));
352562abd39SPaul Durrant queue->pending_tx_info[pending_idx].extra_count = extra_count;
353f53c3fe8SZoltan Kiss }
354f53c3fe8SZoltan Kiss
xenvif_alloc_skb(unsigned int size)355e3377f36SZoltan Kiss static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
356e3377f36SZoltan Kiss {
357e3377f36SZoltan Kiss struct sk_buff *skb =
358e3377f36SZoltan Kiss alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
359e3377f36SZoltan Kiss GFP_ATOMIC | __GFP_NOWARN);
36005310f31SJuergen Gross
36105310f31SJuergen Gross BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
362e3377f36SZoltan Kiss if (unlikely(skb == NULL))
363e3377f36SZoltan Kiss return NULL;
364e3377f36SZoltan Kiss
365e3377f36SZoltan Kiss /* Packets passed to netif_rx() must have some headroom. */
366e3377f36SZoltan Kiss skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
367e3377f36SZoltan Kiss
368e3377f36SZoltan Kiss /* Initialize it here to avoid later surprises */
369e3377f36SZoltan Kiss skb_shinfo(skb)->destructor_arg = NULL;
370e3377f36SZoltan Kiss
371e3377f36SZoltan Kiss return skb;
372e3377f36SZoltan Kiss }
373e3377f36SZoltan Kiss
xenvif_get_requests(struct xenvif_queue * queue,struct sk_buff * skb,struct xen_netif_tx_request * first,struct xen_netif_tx_request * txfrags,unsigned * copy_ops,unsigned * map_ops,unsigned int frag_overflow,struct sk_buff * nskb,unsigned int extra_count,unsigned int data_len)374ad7f402aSRoss Lagerwall static void xenvif_get_requests(struct xenvif_queue *queue,
375f942dc25SIan Campbell struct sk_buff *skb,
376ad7f402aSRoss Lagerwall struct xen_netif_tx_request *first,
377ad7f402aSRoss Lagerwall struct xen_netif_tx_request *txfrags,
378ad7f402aSRoss Lagerwall unsigned *copy_ops,
379ad7f402aSRoss Lagerwall unsigned *map_ops,
3802475b225SRoss Lagerwall unsigned int frag_overflow,
381ad7f402aSRoss Lagerwall struct sk_buff *nskb,
382ad7f402aSRoss Lagerwall unsigned int extra_count,
383ad7f402aSRoss Lagerwall unsigned int data_len)
384f942dc25SIan Campbell {
385f942dc25SIan Campbell struct skb_shared_info *shinfo = skb_shinfo(skb);
386f942dc25SIan Campbell skb_frag_t *frags = shinfo->frags;
387ad7f402aSRoss Lagerwall u16 pending_idx;
38862bad319SZoltan Kiss pending_ring_idx_t index;
3892475b225SRoss Lagerwall unsigned int nr_slots;
390ad7f402aSRoss Lagerwall struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
391ad7f402aSRoss Lagerwall struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
392ad7f402aSRoss Lagerwall struct xen_netif_tx_request *txp = first;
3932810e5b9SWei Liu
394534fc31dSRoss Lagerwall nr_slots = shinfo->nr_frags + frag_overflow + 1;
395f942dc25SIan Campbell
396ad7f402aSRoss Lagerwall copy_count(skb) = 0;
39705310f31SJuergen Gross XENVIF_TX_CB(skb)->split_mask = 0;
398f942dc25SIan Campbell
399ad7f402aSRoss Lagerwall /* Create copy ops for exactly data_len bytes into the skb head. */
400ad7f402aSRoss Lagerwall __skb_put(skb, data_len);
401ad7f402aSRoss Lagerwall while (data_len > 0) {
402ad7f402aSRoss Lagerwall int amount = data_len > txp->size ? txp->size : data_len;
40305310f31SJuergen Gross bool split = false;
404ad7f402aSRoss Lagerwall
405ad7f402aSRoss Lagerwall cop->source.u.ref = txp->gref;
406ad7f402aSRoss Lagerwall cop->source.domid = queue->vif->domid;
407ad7f402aSRoss Lagerwall cop->source.offset = txp->offset;
408ad7f402aSRoss Lagerwall
409ad7f402aSRoss Lagerwall cop->dest.domid = DOMID_SELF;
410ad7f402aSRoss Lagerwall cop->dest.offset = (offset_in_page(skb->data +
411ad7f402aSRoss Lagerwall skb_headlen(skb) -
412ad7f402aSRoss Lagerwall data_len)) & ~XEN_PAGE_MASK;
413ad7f402aSRoss Lagerwall cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
414ad7f402aSRoss Lagerwall - data_len);
415ad7f402aSRoss Lagerwall
41605310f31SJuergen Gross /* Don't cross local page boundary! */
41705310f31SJuergen Gross if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
41805310f31SJuergen Gross amount = XEN_PAGE_SIZE - cop->dest.offset;
41905310f31SJuergen Gross XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
42005310f31SJuergen Gross split = true;
42105310f31SJuergen Gross }
42205310f31SJuergen Gross
423ad7f402aSRoss Lagerwall cop->len = amount;
424ad7f402aSRoss Lagerwall cop->flags = GNTCOPY_source_gref;
425ad7f402aSRoss Lagerwall
426ad7f402aSRoss Lagerwall index = pending_index(queue->pending_cons);
427ad7f402aSRoss Lagerwall pending_idx = queue->pending_ring[index];
428ad7f402aSRoss Lagerwall callback_param(queue, pending_idx).ctx = NULL;
429ad7f402aSRoss Lagerwall copy_pending_idx(skb, copy_count(skb)) = pending_idx;
43005310f31SJuergen Gross if (!split)
431ad7f402aSRoss Lagerwall copy_count(skb)++;
432ad7f402aSRoss Lagerwall
433ad7f402aSRoss Lagerwall cop++;
434ad7f402aSRoss Lagerwall data_len -= amount;
435ad7f402aSRoss Lagerwall
436ad7f402aSRoss Lagerwall if (amount == txp->size) {
437ad7f402aSRoss Lagerwall /* The copy op covered the full tx_request */
438ad7f402aSRoss Lagerwall
439ad7f402aSRoss Lagerwall memcpy(&queue->pending_tx_info[pending_idx].req,
440ad7f402aSRoss Lagerwall txp, sizeof(*txp));
441ad7f402aSRoss Lagerwall queue->pending_tx_info[pending_idx].extra_count =
442ad7f402aSRoss Lagerwall (txp == first) ? extra_count : 0;
443ad7f402aSRoss Lagerwall
444ad7f402aSRoss Lagerwall if (txp == first)
445ad7f402aSRoss Lagerwall txp = txfrags;
446ad7f402aSRoss Lagerwall else
447ad7f402aSRoss Lagerwall txp++;
448ad7f402aSRoss Lagerwall queue->pending_cons++;
449ad7f402aSRoss Lagerwall nr_slots--;
450ad7f402aSRoss Lagerwall } else {
451ad7f402aSRoss Lagerwall /* The copy op partially covered the tx_request.
45205310f31SJuergen Gross * The remainder will be mapped or copied in the next
45305310f31SJuergen Gross * iteration.
454ad7f402aSRoss Lagerwall */
455ad7f402aSRoss Lagerwall txp->offset += amount;
456ad7f402aSRoss Lagerwall txp->size -= amount;
457ad7f402aSRoss Lagerwall }
458ad7f402aSRoss Lagerwall }
459ad7f402aSRoss Lagerwall
460534fc31dSRoss Lagerwall for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
46178376d44SJan Beulich nr_slots--) {
46278376d44SJan Beulich if (unlikely(!txp->size)) {
46378376d44SJan Beulich make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
46478376d44SJan Beulich ++txp;
46578376d44SJan Beulich continue;
46678376d44SJan Beulich }
46778376d44SJan Beulich
468e9ce7cb6SWei Liu index = pending_index(queue->pending_cons++);
469e9ce7cb6SWei Liu pending_idx = queue->pending_ring[index];
470ad7f402aSRoss Lagerwall xenvif_tx_create_map_op(queue, pending_idx, txp,
471ad7f402aSRoss Lagerwall txp == first ? extra_count : 0, gop);
472f53c3fe8SZoltan Kiss frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
47378376d44SJan Beulich ++shinfo->nr_frags;
47478376d44SJan Beulich ++gop;
475ad7f402aSRoss Lagerwall
476ad7f402aSRoss Lagerwall if (txp == first)
477ad7f402aSRoss Lagerwall txp = txfrags;
478ad7f402aSRoss Lagerwall else
479ad7f402aSRoss Lagerwall txp++;
4802810e5b9SWei Liu }
4812810e5b9SWei Liu
482534fc31dSRoss Lagerwall if (nr_slots > 0) {
483e3377f36SZoltan Kiss
484e3377f36SZoltan Kiss shinfo = skb_shinfo(nskb);
485e3377f36SZoltan Kiss frags = shinfo->frags;
486e3377f36SZoltan Kiss
48778376d44SJan Beulich for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
48878376d44SJan Beulich if (unlikely(!txp->size)) {
48978376d44SJan Beulich make_tx_response(queue, txp, 0,
49078376d44SJan Beulich XEN_NETIF_RSP_OKAY);
49178376d44SJan Beulich continue;
49278376d44SJan Beulich }
49378376d44SJan Beulich
494e9ce7cb6SWei Liu index = pending_index(queue->pending_cons++);
495e9ce7cb6SWei Liu pending_idx = queue->pending_ring[index];
496562abd39SPaul Durrant xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
497562abd39SPaul Durrant gop);
498e3377f36SZoltan Kiss frag_set_pending_idx(&frags[shinfo->nr_frags],
499e3377f36SZoltan Kiss pending_idx);
50078376d44SJan Beulich ++shinfo->nr_frags;
50178376d44SJan Beulich ++gop;
502e3377f36SZoltan Kiss }
503e3377f36SZoltan Kiss
50478376d44SJan Beulich if (shinfo->nr_frags) {
505e3377f36SZoltan Kiss skb_shinfo(skb)->frag_list = nskb;
50678376d44SJan Beulich nskb = NULL;
50778376d44SJan Beulich }
50878376d44SJan Beulich }
50978376d44SJan Beulich
51078376d44SJan Beulich if (nskb) {
511534fc31dSRoss Lagerwall /* A frag_list skb was allocated but it is no longer needed
51278376d44SJan Beulich * because enough slots were converted to copy ops above or some
51378376d44SJan Beulich * were empty.
514534fc31dSRoss Lagerwall */
515534fc31dSRoss Lagerwall kfree_skb(nskb);
516e3377f36SZoltan Kiss }
5172810e5b9SWei Liu
518ad7f402aSRoss Lagerwall (*copy_ops) = cop - queue->tx_copy_ops;
519ad7f402aSRoss Lagerwall (*map_ops) = gop - queue->tx_map_ops;
520f942dc25SIan Campbell }
521f942dc25SIan Campbell
xenvif_grant_handle_set(struct xenvif_queue * queue,u16 pending_idx,grant_handle_t handle)522e9ce7cb6SWei Liu static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
523f53c3fe8SZoltan Kiss u16 pending_idx,
524f53c3fe8SZoltan Kiss grant_handle_t handle)
525f53c3fe8SZoltan Kiss {
526e9ce7cb6SWei Liu if (unlikely(queue->grant_tx_handle[pending_idx] !=
527f53c3fe8SZoltan Kiss NETBACK_INVALID_HANDLE)) {
528e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
52968946159SJulien Grall "Trying to overwrite active handle! pending_idx: 0x%x\n",
530f53c3fe8SZoltan Kiss pending_idx);
531f53c3fe8SZoltan Kiss BUG();
532f53c3fe8SZoltan Kiss }
533e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx] = handle;
534f53c3fe8SZoltan Kiss }
535f53c3fe8SZoltan Kiss
xenvif_grant_handle_reset(struct xenvif_queue * queue,u16 pending_idx)536e9ce7cb6SWei Liu static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
537f53c3fe8SZoltan Kiss u16 pending_idx)
538f53c3fe8SZoltan Kiss {
539e9ce7cb6SWei Liu if (unlikely(queue->grant_tx_handle[pending_idx] ==
540f53c3fe8SZoltan Kiss NETBACK_INVALID_HANDLE)) {
541e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
54268946159SJulien Grall "Trying to unmap invalid handle! pending_idx: 0x%x\n",
543f53c3fe8SZoltan Kiss pending_idx);
544f53c3fe8SZoltan Kiss BUG();
545f53c3fe8SZoltan Kiss }
546e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
547f53c3fe8SZoltan Kiss }
548f53c3fe8SZoltan Kiss
xenvif_tx_check_gop(struct xenvif_queue * queue,struct sk_buff * skb,struct gnttab_map_grant_ref ** gopp_map,struct gnttab_copy ** gopp_copy)549e9ce7cb6SWei Liu static int xenvif_tx_check_gop(struct xenvif_queue *queue,
550f942dc25SIan Campbell struct sk_buff *skb,
551bdab8275SZoltan Kiss struct gnttab_map_grant_ref **gopp_map,
552bdab8275SZoltan Kiss struct gnttab_copy **gopp_copy)
553f942dc25SIan Campbell {
5549074ce24SZoltan Kiss struct gnttab_map_grant_ref *gop_map = *gopp_map;
555ad7f402aSRoss Lagerwall u16 pending_idx;
5561a998d3eSZoltan Kiss /* This always points to the shinfo of the skb being checked, which
5571a998d3eSZoltan Kiss * could be either the first or the one on the frag_list
5581a998d3eSZoltan Kiss */
559f942dc25SIan Campbell struct skb_shared_info *shinfo = skb_shinfo(skb);
5601a998d3eSZoltan Kiss /* If this is non-NULL, we are currently checking the frag_list skb, and
5611a998d3eSZoltan Kiss * this points to the shinfo of the first one
5621a998d3eSZoltan Kiss */
5631a998d3eSZoltan Kiss struct skb_shared_info *first_shinfo = NULL;
564f942dc25SIan Campbell int nr_frags = shinfo->nr_frags;
5651b860da0SZoltan Kiss const bool sharedslot = nr_frags &&
566ad7f402aSRoss Lagerwall frag_get_pending_idx(&shinfo->frags[0]) ==
567ad7f402aSRoss Lagerwall copy_pending_idx(skb, copy_count(skb) - 1);
5687dfa764eSJuergen Gross int i, err = 0;
569f942dc25SIan Campbell
570ad7f402aSRoss Lagerwall for (i = 0; i < copy_count(skb); i++) {
571ad7f402aSRoss Lagerwall int newerr;
572ad7f402aSRoss Lagerwall
573f942dc25SIan Campbell /* Check status of header. */
574ad7f402aSRoss Lagerwall pending_idx = copy_pending_idx(skb, i);
575ad7f402aSRoss Lagerwall
576ad7f402aSRoss Lagerwall newerr = (*gopp_copy)->status;
57705310f31SJuergen Gross
57805310f31SJuergen Gross /* Split copies need to be handled together. */
57905310f31SJuergen Gross if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
58005310f31SJuergen Gross (*gopp_copy)++;
58105310f31SJuergen Gross if (!newerr)
58205310f31SJuergen Gross newerr = (*gopp_copy)->status;
58305310f31SJuergen Gross }
584ad7f402aSRoss Lagerwall if (likely(!newerr)) {
585ad7f402aSRoss Lagerwall /* The first frag might still have this slot mapped */
586ad7f402aSRoss Lagerwall if (i < copy_count(skb) - 1 || !sharedslot)
587ad7f402aSRoss Lagerwall xenvif_idx_release(queue, pending_idx,
588ad7f402aSRoss Lagerwall XEN_NETIF_RSP_OKAY);
589ad7f402aSRoss Lagerwall } else {
590ad7f402aSRoss Lagerwall err = newerr;
591bdab8275SZoltan Kiss if (net_ratelimit())
592e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
59300aefcebSZoltan Kiss "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
594bdab8275SZoltan Kiss (*gopp_copy)->status,
595bdab8275SZoltan Kiss pending_idx,
596bdab8275SZoltan Kiss (*gopp_copy)->source.u.ref);
5971b860da0SZoltan Kiss /* The first frag might still have this slot mapped */
598ad7f402aSRoss Lagerwall if (i < copy_count(skb) - 1 || !sharedslot)
5991b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6001b860da0SZoltan Kiss XEN_NETIF_RSP_ERROR);
601bdab8275SZoltan Kiss }
602d8cfbfc4SZoltan Kiss (*gopp_copy)++;
603ad7f402aSRoss Lagerwall }
604f942dc25SIan Campbell
605e3377f36SZoltan Kiss check_frags:
606bdab8275SZoltan Kiss for (i = 0; i < nr_frags; i++, gop_map++) {
607f942dc25SIan Campbell int j, newerr;
608f942dc25SIan Campbell
609ea066ad1SIan Campbell pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
610f942dc25SIan Campbell
611f942dc25SIan Campbell /* Check error status: if okay then remember grant handle. */
612bdab8275SZoltan Kiss newerr = gop_map->status;
6132810e5b9SWei Liu
614f942dc25SIan Campbell if (likely(!newerr)) {
615e9ce7cb6SWei Liu xenvif_grant_handle_set(queue,
6169074ce24SZoltan Kiss pending_idx,
6179074ce24SZoltan Kiss gop_map->handle);
618f942dc25SIan Campbell /* Had a previous error? Invalidate this fragment. */
6191b860da0SZoltan Kiss if (unlikely(err)) {
620e9ce7cb6SWei Liu xenvif_idx_unmap(queue, pending_idx);
6211b860da0SZoltan Kiss /* If the mapping of the first frag was OK, but
6221b860da0SZoltan Kiss * the header's copy failed, and they are
6231b860da0SZoltan Kiss * sharing a slot, send an error
6241b860da0SZoltan Kiss */
6253ede7f84SJan Beulich if (i == 0 && !first_shinfo && sharedslot)
6261b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6271b860da0SZoltan Kiss XEN_NETIF_RSP_ERROR);
6281b860da0SZoltan Kiss else
6291b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6301b860da0SZoltan Kiss XEN_NETIF_RSP_OKAY);
6311b860da0SZoltan Kiss }
632f942dc25SIan Campbell continue;
633f942dc25SIan Campbell }
634f942dc25SIan Campbell
635f942dc25SIan Campbell /* Error on this fragment: respond to client with an error. */
636bdab8275SZoltan Kiss if (net_ratelimit())
637e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
63800aefcebSZoltan Kiss "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
639bdab8275SZoltan Kiss i,
640bdab8275SZoltan Kiss gop_map->status,
641bdab8275SZoltan Kiss pending_idx,
642bdab8275SZoltan Kiss gop_map->ref);
6431b860da0SZoltan Kiss
644e9ce7cb6SWei Liu xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
645f942dc25SIan Campbell
646f942dc25SIan Campbell /* Not the first error? Preceding frags already invalidated. */
647f942dc25SIan Campbell if (err)
648f942dc25SIan Campbell continue;
6491b860da0SZoltan Kiss
6501b860da0SZoltan Kiss /* Invalidate preceding fragments of this skb. */
651bdab8275SZoltan Kiss for (j = 0; j < i; j++) {
6525ccb3ea7SJan Beulich pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
653e9ce7cb6SWei Liu xenvif_idx_unmap(queue, pending_idx);
6541b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6551b860da0SZoltan Kiss XEN_NETIF_RSP_OKAY);
656f942dc25SIan Campbell }
657f942dc25SIan Campbell
6581a998d3eSZoltan Kiss /* And if we found the error while checking the frag_list, unmap
6591a998d3eSZoltan Kiss * the first skb's frags
6601a998d3eSZoltan Kiss */
6611a998d3eSZoltan Kiss if (first_shinfo) {
6621a998d3eSZoltan Kiss for (j = 0; j < first_shinfo->nr_frags; j++) {
6631a998d3eSZoltan Kiss pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
6641a998d3eSZoltan Kiss xenvif_idx_unmap(queue, pending_idx);
6651b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6661b860da0SZoltan Kiss XEN_NETIF_RSP_OKAY);
6671a998d3eSZoltan Kiss }
668f942dc25SIan Campbell }
669f942dc25SIan Campbell
670f942dc25SIan Campbell /* Remember the error: invalidate all subsequent fragments. */
671f942dc25SIan Campbell err = newerr;
672f942dc25SIan Campbell }
673f942dc25SIan Campbell
6741a998d3eSZoltan Kiss if (skb_has_frag_list(skb) && !first_shinfo) {
675826d8217SJan Beulich first_shinfo = shinfo;
676826d8217SJan Beulich shinfo = skb_shinfo(shinfo->frag_list);
677e3377f36SZoltan Kiss nr_frags = shinfo->nr_frags;
678e3377f36SZoltan Kiss
679e3377f36SZoltan Kiss goto check_frags;
680e3377f36SZoltan Kiss }
681e3377f36SZoltan Kiss
682bdab8275SZoltan Kiss *gopp_map = gop_map;
683f942dc25SIan Campbell return err;
684f942dc25SIan Campbell }
685f942dc25SIan Campbell
xenvif_fill_frags(struct xenvif_queue * queue,struct sk_buff * skb)686e9ce7cb6SWei Liu static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
687f942dc25SIan Campbell {
688f942dc25SIan Campbell struct skb_shared_info *shinfo = skb_shinfo(skb);
689f942dc25SIan Campbell int nr_frags = shinfo->nr_frags;
690f942dc25SIan Campbell int i;
691f53c3fe8SZoltan Kiss u16 prev_pending_idx = INVALID_PENDING_IDX;
692f53c3fe8SZoltan Kiss
693f942dc25SIan Campbell for (i = 0; i < nr_frags; i++) {
694f942dc25SIan Campbell skb_frag_t *frag = shinfo->frags + i;
695f942dc25SIan Campbell struct xen_netif_tx_request *txp;
696ea066ad1SIan Campbell struct page *page;
697ea066ad1SIan Campbell u16 pending_idx;
698f942dc25SIan Campbell
699ea066ad1SIan Campbell pending_idx = frag_get_pending_idx(frag);
700f942dc25SIan Campbell
701f53c3fe8SZoltan Kiss /* If this is not the first frag, chain it to the previous*/
702bdab8275SZoltan Kiss if (prev_pending_idx == INVALID_PENDING_IDX)
703f53c3fe8SZoltan Kiss skb_shinfo(skb)->destructor_arg =
704e9ce7cb6SWei Liu &callback_param(queue, pending_idx);
705bdab8275SZoltan Kiss else
706e9ce7cb6SWei Liu callback_param(queue, prev_pending_idx).ctx =
707e9ce7cb6SWei Liu &callback_param(queue, pending_idx);
708f53c3fe8SZoltan Kiss
709e9ce7cb6SWei Liu callback_param(queue, pending_idx).ctx = NULL;
710f53c3fe8SZoltan Kiss prev_pending_idx = pending_idx;
711f53c3fe8SZoltan Kiss
712e9ce7cb6SWei Liu txp = &queue->pending_tx_info[pending_idx].req;
713e36bfc0bSLinus Walleij page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
714ea066ad1SIan Campbell __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
715f942dc25SIan Campbell skb->len += txp->size;
716f942dc25SIan Campbell skb->data_len += txp->size;
717f942dc25SIan Campbell skb->truesize += txp->size;
718f942dc25SIan Campbell
719f53c3fe8SZoltan Kiss /* Take an extra reference to offset network stack's put_page */
720e9ce7cb6SWei Liu get_page(queue->mmap_pages[pending_idx]);
721f942dc25SIan Campbell }
722f942dc25SIan Campbell }
723f942dc25SIan Campbell
xenvif_get_extras(struct xenvif_queue * queue,struct xen_netif_extra_info * extras,unsigned int * extra_count,int work_to_do)724e9ce7cb6SWei Liu static int xenvif_get_extras(struct xenvif_queue *queue,
725f942dc25SIan Campbell struct xen_netif_extra_info *extras,
726562abd39SPaul Durrant unsigned int *extra_count,
727f942dc25SIan Campbell int work_to_do)
728f942dc25SIan Campbell {
729f942dc25SIan Campbell struct xen_netif_extra_info extra;
730e9ce7cb6SWei Liu RING_IDX cons = queue->tx.req_cons;
731f942dc25SIan Campbell
732f942dc25SIan Campbell do {
733f942dc25SIan Campbell if (unlikely(work_to_do-- <= 0)) {
734e9ce7cb6SWei Liu netdev_err(queue->vif->dev, "Missing extra info\n");
735e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
736f942dc25SIan Campbell return -EBADR;
737f942dc25SIan Campbell }
738f942dc25SIan Campbell
73968a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, cons, &extra);
740562abd39SPaul Durrant
741562abd39SPaul Durrant queue->tx.req_cons = ++cons;
742562abd39SPaul Durrant (*extra_count)++;
743562abd39SPaul Durrant
744f942dc25SIan Campbell if (unlikely(!extra.type ||
745f942dc25SIan Campbell extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
746e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
747f942dc25SIan Campbell "Invalid extra type: %d\n", extra.type);
748e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
749f942dc25SIan Campbell return -EINVAL;
750f942dc25SIan Campbell }
751f942dc25SIan Campbell
752f942dc25SIan Campbell memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
753f942dc25SIan Campbell } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
754f942dc25SIan Campbell
755f942dc25SIan Campbell return work_to_do;
756f942dc25SIan Campbell }
757f942dc25SIan Campbell
xenvif_set_skb_gso(struct xenvif * vif,struct sk_buff * skb,struct xen_netif_extra_info * gso)7587376419aSWei Liu static int xenvif_set_skb_gso(struct xenvif *vif,
759f942dc25SIan Campbell struct sk_buff *skb,
760f942dc25SIan Campbell struct xen_netif_extra_info *gso)
761f942dc25SIan Campbell {
762f942dc25SIan Campbell if (!gso->u.gso.size) {
76348856286SIan Campbell netdev_err(vif->dev, "GSO size must not be zero.\n");
7647376419aSWei Liu xenvif_fatal_tx_err(vif);
765f942dc25SIan Campbell return -EINVAL;
766f942dc25SIan Campbell }
767f942dc25SIan Campbell
768a9468587SPaul Durrant switch (gso->u.gso.type) {
769a9468587SPaul Durrant case XEN_NETIF_GSO_TYPE_TCPV4:
770a9468587SPaul Durrant skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
771a9468587SPaul Durrant break;
772a9468587SPaul Durrant case XEN_NETIF_GSO_TYPE_TCPV6:
773a9468587SPaul Durrant skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
774a9468587SPaul Durrant break;
775a9468587SPaul Durrant default:
77648856286SIan Campbell netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
7777376419aSWei Liu xenvif_fatal_tx_err(vif);
778f942dc25SIan Campbell return -EINVAL;
779f942dc25SIan Campbell }
780f942dc25SIan Campbell
781f942dc25SIan Campbell skb_shinfo(skb)->gso_size = gso->u.gso.size;
782b89587a7SPaul Durrant /* gso_segs will be calculated later */
783f942dc25SIan Campbell
784f942dc25SIan Campbell return 0;
785f942dc25SIan Campbell }
786f942dc25SIan Campbell
checksum_setup(struct xenvif_queue * queue,struct sk_buff * skb)787e9ce7cb6SWei Liu static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
788f942dc25SIan Campbell {
7892721637cSPaul Durrant bool recalculate_partial_csum = false;
790f942dc25SIan Campbell
7912eba61d5SPaul Durrant /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
792f942dc25SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO
793f942dc25SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and
794f942dc25SIan Campbell * recalculate the partial checksum.
795f942dc25SIan Campbell */
796f942dc25SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
797e9ce7cb6SWei Liu queue->stats.rx_gso_checksum_fixup++;
798f942dc25SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL;
7992721637cSPaul Durrant recalculate_partial_csum = true;
800f942dc25SIan Campbell }
801f942dc25SIan Campbell
802f942dc25SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
803f942dc25SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL)
804f942dc25SIan Campbell return 0;
805f942dc25SIan Campbell
8062721637cSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum);
807f942dc25SIan Campbell }
808f942dc25SIan Campbell
tx_credit_exceeded(struct xenvif_queue * queue,unsigned size)809e9ce7cb6SWei Liu static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
810f942dc25SIan Campbell {
811059dfa6aSWei Liu u64 now = get_jiffies_64();
812e9ce7cb6SWei Liu u64 next_credit = queue->credit_window_start +
813e9ce7cb6SWei Liu msecs_to_jiffies(queue->credit_usec / 1000);
814f942dc25SIan Campbell
815f942dc25SIan Campbell /* Timer could already be pending in rare cases. */
816dfa523aeSWei Liu if (timer_pending(&queue->credit_timeout)) {
817dfa523aeSWei Liu queue->rate_limited = true;
818f942dc25SIan Campbell return true;
819dfa523aeSWei Liu }
820f942dc25SIan Campbell
821f942dc25SIan Campbell /* Passed the point where we can replenish credit? */
822059dfa6aSWei Liu if (time_after_eq64(now, next_credit)) {
823e9ce7cb6SWei Liu queue->credit_window_start = now;
824e9ce7cb6SWei Liu tx_add_credit(queue);
825f942dc25SIan Campbell }
826f942dc25SIan Campbell
827f942dc25SIan Campbell /* Still too big to send right now? Set a callback. */
828e9ce7cb6SWei Liu if (size > queue->remaining_credit) {
829e9ce7cb6SWei Liu mod_timer(&queue->credit_timeout,
830f942dc25SIan Campbell next_credit);
831e9ce7cb6SWei Liu queue->credit_window_start = next_credit;
832dfa523aeSWei Liu queue->rate_limited = true;
833f942dc25SIan Campbell
834f942dc25SIan Campbell return true;
835f942dc25SIan Campbell }
836f942dc25SIan Campbell
837f942dc25SIan Campbell return false;
838f942dc25SIan Campbell }
839f942dc25SIan Campbell
840210c34dcSPaul Durrant /* No locking is required in xenvif_mcast_add/del() as they are
841210c34dcSPaul Durrant * only ever invoked from NAPI poll. An RCU list is used because
842210c34dcSPaul Durrant * xenvif_mcast_match() is called asynchronously, during start_xmit.
843210c34dcSPaul Durrant */
844210c34dcSPaul Durrant
xenvif_mcast_add(struct xenvif * vif,const u8 * addr)845210c34dcSPaul Durrant static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
846210c34dcSPaul Durrant {
847210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
848210c34dcSPaul Durrant
849210c34dcSPaul Durrant if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
850210c34dcSPaul Durrant if (net_ratelimit())
851210c34dcSPaul Durrant netdev_err(vif->dev,
852210c34dcSPaul Durrant "Too many multicast addresses\n");
853210c34dcSPaul Durrant return -ENOSPC;
854210c34dcSPaul Durrant }
855210c34dcSPaul Durrant
856210c34dcSPaul Durrant mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
857210c34dcSPaul Durrant if (!mcast)
858210c34dcSPaul Durrant return -ENOMEM;
859210c34dcSPaul Durrant
860210c34dcSPaul Durrant ether_addr_copy(mcast->addr, addr);
861210c34dcSPaul Durrant list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
862210c34dcSPaul Durrant vif->fe_mcast_count++;
863210c34dcSPaul Durrant
864210c34dcSPaul Durrant return 0;
865210c34dcSPaul Durrant }
866210c34dcSPaul Durrant
xenvif_mcast_del(struct xenvif * vif,const u8 * addr)867210c34dcSPaul Durrant static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
868210c34dcSPaul Durrant {
869210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
870210c34dcSPaul Durrant
871210c34dcSPaul Durrant list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
872210c34dcSPaul Durrant if (ether_addr_equal(addr, mcast->addr)) {
873210c34dcSPaul Durrant --vif->fe_mcast_count;
874210c34dcSPaul Durrant list_del_rcu(&mcast->entry);
875210c34dcSPaul Durrant kfree_rcu(mcast, rcu);
876210c34dcSPaul Durrant break;
877210c34dcSPaul Durrant }
878210c34dcSPaul Durrant }
879210c34dcSPaul Durrant }
880210c34dcSPaul Durrant
xenvif_mcast_match(struct xenvif * vif,const u8 * addr)881210c34dcSPaul Durrant bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
882210c34dcSPaul Durrant {
883210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
884210c34dcSPaul Durrant
885210c34dcSPaul Durrant rcu_read_lock();
886210c34dcSPaul Durrant list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
887210c34dcSPaul Durrant if (ether_addr_equal(addr, mcast->addr)) {
888210c34dcSPaul Durrant rcu_read_unlock();
889210c34dcSPaul Durrant return true;
890210c34dcSPaul Durrant }
891210c34dcSPaul Durrant }
892210c34dcSPaul Durrant rcu_read_unlock();
893210c34dcSPaul Durrant
894210c34dcSPaul Durrant return false;
895210c34dcSPaul Durrant }
896210c34dcSPaul Durrant
xenvif_mcast_addr_list_free(struct xenvif * vif)897210c34dcSPaul Durrant void xenvif_mcast_addr_list_free(struct xenvif *vif)
898210c34dcSPaul Durrant {
899210c34dcSPaul Durrant /* No need for locking or RCU here. NAPI poll and TX queue
900210c34dcSPaul Durrant * are stopped.
901210c34dcSPaul Durrant */
902210c34dcSPaul Durrant while (!list_empty(&vif->fe_mcast_addr)) {
903210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
904210c34dcSPaul Durrant
905210c34dcSPaul Durrant mcast = list_first_entry(&vif->fe_mcast_addr,
906210c34dcSPaul Durrant struct xenvif_mcast_addr,
907210c34dcSPaul Durrant entry);
908210c34dcSPaul Durrant --vif->fe_mcast_count;
909210c34dcSPaul Durrant list_del(&mcast->entry);
910210c34dcSPaul Durrant kfree(mcast);
911210c34dcSPaul Durrant }
912210c34dcSPaul Durrant }
913210c34dcSPaul Durrant
xenvif_tx_build_gops(struct xenvif_queue * queue,int budget,unsigned * copy_ops,unsigned * map_ops)914e9ce7cb6SWei Liu static void xenvif_tx_build_gops(struct xenvif_queue *queue,
915bdab8275SZoltan Kiss int budget,
916bdab8275SZoltan Kiss unsigned *copy_ops,
917bdab8275SZoltan Kiss unsigned *map_ops)
918f942dc25SIan Campbell {
9192475b225SRoss Lagerwall struct sk_buff *skb, *nskb;
920f942dc25SIan Campbell int ret;
9212475b225SRoss Lagerwall unsigned int frag_overflow;
922f942dc25SIan Campbell
923e9ce7cb6SWei Liu while (skb_queue_len(&queue->tx_queue) < budget) {
924f942dc25SIan Campbell struct xen_netif_tx_request txreq;
92537641494SWei Liu struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
926f942dc25SIan Campbell struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
927562abd39SPaul Durrant unsigned int extra_count;
928f942dc25SIan Campbell RING_IDX idx;
929f942dc25SIan Campbell int work_to_do;
930f942dc25SIan Campbell unsigned int data_len;
931f942dc25SIan Campbell
932e9ce7cb6SWei Liu if (queue->tx.sring->req_prod - queue->tx.req_cons >
93348856286SIan Campbell XEN_NETIF_TX_RING_SIZE) {
934e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
93548856286SIan Campbell "Impossible number of requests. "
93648856286SIan Campbell "req_prod %d, req_cons %d, size %ld\n",
937e9ce7cb6SWei Liu queue->tx.sring->req_prod, queue->tx.req_cons,
93848856286SIan Campbell XEN_NETIF_TX_RING_SIZE);
939e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
940e9d8b2c2SWei Liu break;
94148856286SIan Campbell }
94248856286SIan Campbell
94309e545f7SJuergen Gross work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
944b3f980bdSWei Liu if (!work_to_do)
945b3f980bdSWei Liu break;
946f942dc25SIan Campbell
947e9ce7cb6SWei Liu idx = queue->tx.req_cons;
948f942dc25SIan Campbell rmb(); /* Ensure that we see the request before we copy it. */
94968a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, idx, &txreq);
950f942dc25SIan Campbell
951f942dc25SIan Campbell /* Credit-based scheduling. */
952e9ce7cb6SWei Liu if (txreq.size > queue->remaining_credit &&
953e9ce7cb6SWei Liu tx_credit_exceeded(queue, txreq.size))
954b3f980bdSWei Liu break;
955f942dc25SIan Campbell
956e9ce7cb6SWei Liu queue->remaining_credit -= txreq.size;
957f942dc25SIan Campbell
958f942dc25SIan Campbell work_to_do--;
959e9ce7cb6SWei Liu queue->tx.req_cons = ++idx;
960f942dc25SIan Campbell
961f942dc25SIan Campbell memset(extras, 0, sizeof(extras));
962562abd39SPaul Durrant extra_count = 0;
963f942dc25SIan Campbell if (txreq.flags & XEN_NETTXF_extra_info) {
964e9ce7cb6SWei Liu work_to_do = xenvif_get_extras(queue, extras,
965562abd39SPaul Durrant &extra_count,
966f942dc25SIan Campbell work_to_do);
967e9ce7cb6SWei Liu idx = queue->tx.req_cons;
96848856286SIan Campbell if (unlikely(work_to_do < 0))
969b3f980bdSWei Liu break;
970f942dc25SIan Campbell }
971f942dc25SIan Campbell
972210c34dcSPaul Durrant if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
973210c34dcSPaul Durrant struct xen_netif_extra_info *extra;
974210c34dcSPaul Durrant
975210c34dcSPaul Durrant extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
976210c34dcSPaul Durrant ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
977210c34dcSPaul Durrant
978562abd39SPaul Durrant make_tx_response(queue, &txreq, extra_count,
979210c34dcSPaul Durrant (ret == 0) ?
980210c34dcSPaul Durrant XEN_NETIF_RSP_OKAY :
981210c34dcSPaul Durrant XEN_NETIF_RSP_ERROR);
982210c34dcSPaul Durrant continue;
983210c34dcSPaul Durrant }
984210c34dcSPaul Durrant
985210c34dcSPaul Durrant if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
986210c34dcSPaul Durrant struct xen_netif_extra_info *extra;
987210c34dcSPaul Durrant
988210c34dcSPaul Durrant extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
989210c34dcSPaul Durrant xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
990210c34dcSPaul Durrant
991562abd39SPaul Durrant make_tx_response(queue, &txreq, extra_count,
992562abd39SPaul Durrant XEN_NETIF_RSP_OKAY);
993210c34dcSPaul Durrant continue;
994210c34dcSPaul Durrant }
995210c34dcSPaul Durrant
996ad7f402aSRoss Lagerwall data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
997ad7f402aSRoss Lagerwall XEN_NETBACK_TX_COPY_LEN : txreq.size;
998ad7f402aSRoss Lagerwall
999562abd39SPaul Durrant ret = xenvif_count_requests(queue, &txreq, extra_count,
1000562abd39SPaul Durrant txfrags, work_to_do);
1001ad7f402aSRoss Lagerwall
100248856286SIan Campbell if (unlikely(ret < 0))
1003b3f980bdSWei Liu break;
100448856286SIan Campbell
1005f942dc25SIan Campbell idx += ret;
1006f942dc25SIan Campbell
1007f942dc25SIan Campbell if (unlikely(txreq.size < ETH_HLEN)) {
1008e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
1009f942dc25SIan Campbell "Bad packet size: %d\n", txreq.size);
1010562abd39SPaul Durrant xenvif_tx_err(queue, &txreq, extra_count, idx);
1011b3f980bdSWei Liu break;
1012f942dc25SIan Campbell }
1013f942dc25SIan Campbell
1014f942dc25SIan Campbell /* No crossing a page as the payload mustn't fragment. */
1015d0089e8aSJulien Grall if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
10162eca98e5SJuergen Gross netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
10172eca98e5SJuergen Gross txreq.offset, txreq.size);
1018e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
1019b3f980bdSWei Liu break;
1020f942dc25SIan Campbell }
1021f942dc25SIan Campbell
1022ad7f402aSRoss Lagerwall if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1023ad7f402aSRoss Lagerwall data_len = txreq.size;
1024f942dc25SIan Campbell
1025e3377f36SZoltan Kiss skb = xenvif_alloc_skb(data_len);
1026f942dc25SIan Campbell if (unlikely(skb == NULL)) {
1027e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
1028f942dc25SIan Campbell "Can't allocate a skb in start_xmit.\n");
1029562abd39SPaul Durrant xenvif_tx_err(queue, &txreq, extra_count, idx);
1030f942dc25SIan Campbell break;
1031f942dc25SIan Campbell }
1032f942dc25SIan Campbell
10332475b225SRoss Lagerwall skb_shinfo(skb)->nr_frags = ret;
10342475b225SRoss Lagerwall /* At this point shinfo->nr_frags is in fact the number of
10352475b225SRoss Lagerwall * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
10362475b225SRoss Lagerwall */
10372475b225SRoss Lagerwall frag_overflow = 0;
10382475b225SRoss Lagerwall nskb = NULL;
10392475b225SRoss Lagerwall if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
10402475b225SRoss Lagerwall frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
10412475b225SRoss Lagerwall BUG_ON(frag_overflow > MAX_SKB_FRAGS);
10422475b225SRoss Lagerwall skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
10432475b225SRoss Lagerwall nskb = xenvif_alloc_skb(0);
10442475b225SRoss Lagerwall if (unlikely(nskb == NULL)) {
10453a0233ddSRoss Lagerwall skb_shinfo(skb)->nr_frags = 0;
10462475b225SRoss Lagerwall kfree_skb(skb);
1047562abd39SPaul Durrant xenvif_tx_err(queue, &txreq, extra_count, idx);
10482475b225SRoss Lagerwall if (net_ratelimit())
10492475b225SRoss Lagerwall netdev_err(queue->vif->dev,
10502475b225SRoss Lagerwall "Can't allocate the frag_list skb.\n");
10512475b225SRoss Lagerwall break;
10522475b225SRoss Lagerwall }
10532475b225SRoss Lagerwall }
10542475b225SRoss Lagerwall
1055f942dc25SIan Campbell if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1056f942dc25SIan Campbell struct xen_netif_extra_info *gso;
1057f942dc25SIan Campbell gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1058f942dc25SIan Campbell
1059e9ce7cb6SWei Liu if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
10607376419aSWei Liu /* Failure in xenvif_set_skb_gso is fatal. */
10613a0233ddSRoss Lagerwall skb_shinfo(skb)->nr_frags = 0;
1062f942dc25SIan Campbell kfree_skb(skb);
10632475b225SRoss Lagerwall kfree_skb(nskb);
1064b3f980bdSWei Liu break;
1065f942dc25SIan Campbell }
1066f942dc25SIan Campbell }
1067f942dc25SIan Campbell
1068c2d09fdeSPaul Durrant if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1069c2d09fdeSPaul Durrant struct xen_netif_extra_info *extra;
1070c2d09fdeSPaul Durrant enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1071c2d09fdeSPaul Durrant
1072c2d09fdeSPaul Durrant extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1073c2d09fdeSPaul Durrant
1074c2d09fdeSPaul Durrant switch (extra->u.hash.type) {
1075c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1076c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1077c2d09fdeSPaul Durrant type = PKT_HASH_TYPE_L3;
1078c2d09fdeSPaul Durrant break;
1079c2d09fdeSPaul Durrant
1080c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1081c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1082c2d09fdeSPaul Durrant type = PKT_HASH_TYPE_L4;
1083c2d09fdeSPaul Durrant break;
1084c2d09fdeSPaul Durrant
1085c2d09fdeSPaul Durrant default:
1086c2d09fdeSPaul Durrant break;
1087c2d09fdeSPaul Durrant }
1088c2d09fdeSPaul Durrant
1089c2d09fdeSPaul Durrant if (type != PKT_HASH_TYPE_NONE)
1090c2d09fdeSPaul Durrant skb_set_hash(skb,
1091c2d09fdeSPaul Durrant *(u32 *)extra->u.hash.value,
1092c2d09fdeSPaul Durrant type);
1093c2d09fdeSPaul Durrant }
1094c2d09fdeSPaul Durrant
1095ad7f402aSRoss Lagerwall xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1096ad7f402aSRoss Lagerwall map_ops, frag_overflow, nskb, extra_count,
1097ad7f402aSRoss Lagerwall data_len);
1098f942dc25SIan Campbell
1099e9ce7cb6SWei Liu __skb_queue_tail(&queue->tx_queue, skb);
11001e0b6eacSAnnie Li
1101e9ce7cb6SWei Liu queue->tx.req_cons = idx;
1102f942dc25SIan Campbell }
1103f942dc25SIan Campbell
1104bdab8275SZoltan Kiss return;
1105f942dc25SIan Campbell }
1106f942dc25SIan Campbell
1107e3377f36SZoltan Kiss /* Consolidate skb with a frag_list into a brand new one with local pages on
1108e3377f36SZoltan Kiss * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1109e3377f36SZoltan Kiss */
xenvif_handle_frag_list(struct xenvif_queue * queue,struct sk_buff * skb)1110e9ce7cb6SWei Liu static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1111e3377f36SZoltan Kiss {
1112e3377f36SZoltan Kiss unsigned int offset = skb_headlen(skb);
1113e3377f36SZoltan Kiss skb_frag_t frags[MAX_SKB_FRAGS];
111449d9991aSDavid Vrabel int i, f;
1115e3377f36SZoltan Kiss struct ubuf_info *uarg;
1116e3377f36SZoltan Kiss struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1117e3377f36SZoltan Kiss
1118e9ce7cb6SWei Liu queue->stats.tx_zerocopy_sent += 2;
1119e9ce7cb6SWei Liu queue->stats.tx_frag_overflow++;
1120e3377f36SZoltan Kiss
1121e9ce7cb6SWei Liu xenvif_fill_frags(queue, nskb);
1122e3377f36SZoltan Kiss /* Subtract frags size, we will correct it later */
1123e3377f36SZoltan Kiss skb->truesize -= skb->data_len;
1124e3377f36SZoltan Kiss skb->len += nskb->len;
1125e3377f36SZoltan Kiss skb->data_len += nskb->len;
1126e3377f36SZoltan Kiss
1127e3377f36SZoltan Kiss /* create a brand new frags array and coalesce there */
1128e3377f36SZoltan Kiss for (i = 0; offset < skb->len; i++) {
1129e3377f36SZoltan Kiss struct page *page;
1130e3377f36SZoltan Kiss unsigned int len;
1131e3377f36SZoltan Kiss
1132e3377f36SZoltan Kiss BUG_ON(i >= MAX_SKB_FRAGS);
113344cc8ed1SZoltan Kiss page = alloc_page(GFP_ATOMIC);
1134e3377f36SZoltan Kiss if (!page) {
1135e3377f36SZoltan Kiss int j;
1136e3377f36SZoltan Kiss skb->truesize += skb->data_len;
1137e3377f36SZoltan Kiss for (j = 0; j < i; j++)
1138d7840976SMatthew Wilcox (Oracle) put_page(skb_frag_page(&frags[j]));
1139e3377f36SZoltan Kiss return -ENOMEM;
1140e3377f36SZoltan Kiss }
1141e3377f36SZoltan Kiss
1142e3377f36SZoltan Kiss if (offset + PAGE_SIZE < skb->len)
1143e3377f36SZoltan Kiss len = PAGE_SIZE;
1144e3377f36SZoltan Kiss else
1145e3377f36SZoltan Kiss len = skb->len - offset;
1146e3377f36SZoltan Kiss if (skb_copy_bits(skb, offset, page_address(page), len))
1147e3377f36SZoltan Kiss BUG();
1148e3377f36SZoltan Kiss
1149e3377f36SZoltan Kiss offset += len;
1150b51f4113SYunsheng Lin skb_frag_fill_page_desc(&frags[i], page, 0, len);
1151e3377f36SZoltan Kiss }
115249d9991aSDavid Vrabel
115349d9991aSDavid Vrabel /* Release all the original (foreign) frags. */
115449d9991aSDavid Vrabel for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
115549d9991aSDavid Vrabel skb_frag_unref(skb, f);
1156e3377f36SZoltan Kiss uarg = skb_shinfo(skb)->destructor_arg;
1157a64bd934SWei Liu /* increase inflight counter to offset decrement in callback */
1158a64bd934SWei Liu atomic_inc(&queue->inflight_packets);
115936177832SJonathan Lemon uarg->callback(NULL, uarg, true);
1160e3377f36SZoltan Kiss skb_shinfo(skb)->destructor_arg = NULL;
1161e3377f36SZoltan Kiss
1162b0c21badSDavid Vrabel /* Fill the skb with the new (local) frags. */
1163b0c21badSDavid Vrabel memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1164b0c21badSDavid Vrabel skb_shinfo(skb)->nr_frags = i;
1165b0c21badSDavid Vrabel skb->truesize += i * PAGE_SIZE;
1166e3377f36SZoltan Kiss
1167e3377f36SZoltan Kiss return 0;
1168e3377f36SZoltan Kiss }
1169f942dc25SIan Campbell
xenvif_tx_submit(struct xenvif_queue * queue)1170e9ce7cb6SWei Liu static int xenvif_tx_submit(struct xenvif_queue *queue)
1171b3f980bdSWei Liu {
1172e9ce7cb6SWei Liu struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1173e9ce7cb6SWei Liu struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1174b3f980bdSWei Liu struct sk_buff *skb;
1175b3f980bdSWei Liu int work_done = 0;
1176b3f980bdSWei Liu
1177e9ce7cb6SWei Liu while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1178f942dc25SIan Campbell struct xen_netif_tx_request *txp;
1179f942dc25SIan Campbell u16 pending_idx;
1180f942dc25SIan Campbell
1181ad7f402aSRoss Lagerwall pending_idx = copy_pending_idx(skb, 0);
1182e9ce7cb6SWei Liu txp = &queue->pending_tx_info[pending_idx].req;
1183f942dc25SIan Campbell
1184f942dc25SIan Campbell /* Check the remap error code. */
1185e9ce7cb6SWei Liu if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1186b42cc6e4SZoltan Kiss /* If there was an error, xenvif_tx_check_gop is
1187b42cc6e4SZoltan Kiss * expected to release all the frags which were mapped,
1188b42cc6e4SZoltan Kiss * so kfree_skb shouldn't do it again
1189b42cc6e4SZoltan Kiss */
1190f942dc25SIan Campbell skb_shinfo(skb)->nr_frags = 0;
1191b42cc6e4SZoltan Kiss if (skb_has_frag_list(skb)) {
1192b42cc6e4SZoltan Kiss struct sk_buff *nskb =
1193b42cc6e4SZoltan Kiss skb_shinfo(skb)->frag_list;
1194b42cc6e4SZoltan Kiss skb_shinfo(nskb)->nr_frags = 0;
1195b42cc6e4SZoltan Kiss }
1196f942dc25SIan Campbell kfree_skb(skb);
1197f942dc25SIan Campbell continue;
1198f942dc25SIan Campbell }
1199f942dc25SIan Campbell
1200f942dc25SIan Campbell if (txp->flags & XEN_NETTXF_csum_blank)
1201f942dc25SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL;
1202f942dc25SIan Campbell else if (txp->flags & XEN_NETTXF_data_validated)
1203f942dc25SIan Campbell skb->ip_summed = CHECKSUM_UNNECESSARY;
1204f942dc25SIan Campbell
1205e9ce7cb6SWei Liu xenvif_fill_frags(queue, skb);
1206f942dc25SIan Campbell
1207e3377f36SZoltan Kiss if (unlikely(skb_has_frag_list(skb))) {
120899e87f56SIgor Druzhinin struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
120999e87f56SIgor Druzhinin xenvif_skb_zerocopy_prepare(queue, nskb);
1210e9ce7cb6SWei Liu if (xenvif_handle_frag_list(queue, skb)) {
1211e3377f36SZoltan Kiss if (net_ratelimit())
1212e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
1213e3377f36SZoltan Kiss "Not enough memory to consolidate frag_list!\n");
1214a64bd934SWei Liu xenvif_skb_zerocopy_prepare(queue, skb);
1215e3377f36SZoltan Kiss kfree_skb(skb);
1216e3377f36SZoltan Kiss continue;
1217e3377f36SZoltan Kiss }
121899e87f56SIgor Druzhinin /* Copied all the bits from the frag list -- free it. */
121999e87f56SIgor Druzhinin skb_frag_list_init(skb);
122099e87f56SIgor Druzhinin kfree_skb(nskb);
1221e3377f36SZoltan Kiss }
1222e3377f36SZoltan Kiss
1223e9ce7cb6SWei Liu skb->dev = queue->vif->dev;
1224f942dc25SIan Campbell skb->protocol = eth_type_trans(skb, skb->dev);
1225f9ca8f74SJason Wang skb_reset_network_header(skb);
1226f942dc25SIan Campbell
1227e9ce7cb6SWei Liu if (checksum_setup(queue, skb)) {
1228e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
1229f942dc25SIan Campbell "Can't setup checksum in net_tx_action\n");
1230f53c3fe8SZoltan Kiss /* We have to set this flag to trigger the callback */
1231f53c3fe8SZoltan Kiss if (skb_shinfo(skb)->destructor_arg)
1232a64bd934SWei Liu xenvif_skb_zerocopy_prepare(queue, skb);
1233f942dc25SIan Campbell kfree_skb(skb);
1234f942dc25SIan Campbell continue;
1235f942dc25SIan Campbell }
1236f942dc25SIan Campbell
1237d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb);
1238f9ca8f74SJason Wang
1239b89587a7SPaul Durrant /* If the packet is GSO then we will have just set up the
1240b89587a7SPaul Durrant * transport header offset in checksum_setup so it's now
1241b89587a7SPaul Durrant * straightforward to calculate gso_segs.
1242b89587a7SPaul Durrant */
1243b89587a7SPaul Durrant if (skb_is_gso(skb)) {
1244d2aa125dSMaxim Mikityanskiy int mss, hdrlen;
1245d2aa125dSMaxim Mikityanskiy
1246d2aa125dSMaxim Mikityanskiy /* GSO implies having the L4 header. */
1247d2aa125dSMaxim Mikityanskiy WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1248d2aa125dSMaxim Mikityanskiy if (unlikely(!skb_transport_header_was_set(skb))) {
1249d2aa125dSMaxim Mikityanskiy kfree_skb(skb);
1250d2aa125dSMaxim Mikityanskiy continue;
1251d2aa125dSMaxim Mikityanskiy }
1252d2aa125dSMaxim Mikityanskiy
1253d2aa125dSMaxim Mikityanskiy mss = skb_shinfo(skb)->gso_size;
1254504148feSEric Dumazet hdrlen = skb_tcp_all_headers(skb);
1255b89587a7SPaul Durrant
1256b89587a7SPaul Durrant skb_shinfo(skb)->gso_segs =
1257b89587a7SPaul Durrant DIV_ROUND_UP(skb->len - hdrlen, mss);
1258b89587a7SPaul Durrant }
1259b89587a7SPaul Durrant
1260e9ce7cb6SWei Liu queue->stats.rx_bytes += skb->len;
1261e9ce7cb6SWei Liu queue->stats.rx_packets++;
1262f942dc25SIan Campbell
1263b3f980bdSWei Liu work_done++;
1264b3f980bdSWei Liu
1265f53c3fe8SZoltan Kiss /* Set this flag right before netif_receive_skb, otherwise
1266f53c3fe8SZoltan Kiss * someone might think this packet already left netback, and
1267f53c3fe8SZoltan Kiss * do a skb_copy_ubufs while we are still in control of the
1268f53c3fe8SZoltan Kiss * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1269f53c3fe8SZoltan Kiss */
12701bb332afSZoltan Kiss if (skb_shinfo(skb)->destructor_arg) {
1271a64bd934SWei Liu xenvif_skb_zerocopy_prepare(queue, skb);
1272e9ce7cb6SWei Liu queue->stats.tx_zerocopy_sent++;
12731bb332afSZoltan Kiss }
1274f53c3fe8SZoltan Kiss
1275b3f980bdSWei Liu netif_receive_skb(skb);
1276f942dc25SIan Campbell }
1277b3f980bdSWei Liu
1278b3f980bdSWei Liu return work_done;
1279f942dc25SIan Campbell }
1280f942dc25SIan Campbell
xenvif_zerocopy_callback(struct sk_buff * skb,struct ubuf_info * ubuf_base,bool zerocopy_success)1281b63ca3e8SPavel Begunkov void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
128236177832SJonathan Lemon bool zerocopy_success)
12833e2234b3SZoltan Kiss {
1284f53c3fe8SZoltan Kiss unsigned long flags;
1285f53c3fe8SZoltan Kiss pending_ring_idx_t index;
1286b63ca3e8SPavel Begunkov struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
1287e9ce7cb6SWei Liu struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1288f53c3fe8SZoltan Kiss
1289f53c3fe8SZoltan Kiss /* This is the only place where we grab this lock, to protect callbacks
1290f53c3fe8SZoltan Kiss * from each other.
1291f53c3fe8SZoltan Kiss */
1292e9ce7cb6SWei Liu spin_lock_irqsave(&queue->callback_lock, flags);
1293f53c3fe8SZoltan Kiss do {
1294f53c3fe8SZoltan Kiss u16 pending_idx = ubuf->desc;
1295b63ca3e8SPavel Begunkov ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
1296e9ce7cb6SWei Liu BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1297f53c3fe8SZoltan Kiss MAX_PENDING_REQS);
1298e9ce7cb6SWei Liu index = pending_index(queue->dealloc_prod);
1299e9ce7cb6SWei Liu queue->dealloc_ring[index] = pending_idx;
1300f53c3fe8SZoltan Kiss /* Sync with xenvif_tx_dealloc_action:
1301f53c3fe8SZoltan Kiss * insert idx then incr producer.
1302f53c3fe8SZoltan Kiss */
1303f53c3fe8SZoltan Kiss smp_wmb();
1304e9ce7cb6SWei Liu queue->dealloc_prod++;
1305f53c3fe8SZoltan Kiss } while (ubuf);
1306e9ce7cb6SWei Liu spin_unlock_irqrestore(&queue->callback_lock, flags);
1307f53c3fe8SZoltan Kiss
13081bb332afSZoltan Kiss if (likely(zerocopy_success))
1309e9ce7cb6SWei Liu queue->stats.tx_zerocopy_success++;
13101bb332afSZoltan Kiss else
1311e9ce7cb6SWei Liu queue->stats.tx_zerocopy_fail++;
1312a64bd934SWei Liu xenvif_skb_zerocopy_complete(queue);
1313f53c3fe8SZoltan Kiss }
1314f53c3fe8SZoltan Kiss
xenvif_tx_dealloc_action(struct xenvif_queue * queue)1315e9ce7cb6SWei Liu static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1316f53c3fe8SZoltan Kiss {
1317f53c3fe8SZoltan Kiss struct gnttab_unmap_grant_ref *gop;
1318f53c3fe8SZoltan Kiss pending_ring_idx_t dc, dp;
1319f53c3fe8SZoltan Kiss u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1320f53c3fe8SZoltan Kiss unsigned int i = 0;
1321f53c3fe8SZoltan Kiss
1322e9ce7cb6SWei Liu dc = queue->dealloc_cons;
1323e9ce7cb6SWei Liu gop = queue->tx_unmap_ops;
1324f53c3fe8SZoltan Kiss
1325f53c3fe8SZoltan Kiss /* Free up any grants we have finished using */
1326f53c3fe8SZoltan Kiss do {
1327e9ce7cb6SWei Liu dp = queue->dealloc_prod;
1328f53c3fe8SZoltan Kiss
1329f53c3fe8SZoltan Kiss /* Ensure we see all indices enqueued by all
1330f53c3fe8SZoltan Kiss * xenvif_zerocopy_callback().
1331f53c3fe8SZoltan Kiss */
1332f53c3fe8SZoltan Kiss smp_rmb();
1333f53c3fe8SZoltan Kiss
1334f53c3fe8SZoltan Kiss while (dc != dp) {
133550c2e4ddSDan Carpenter BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1336f53c3fe8SZoltan Kiss pending_idx =
1337e9ce7cb6SWei Liu queue->dealloc_ring[pending_index(dc++)];
1338f53c3fe8SZoltan Kiss
1339e9ce7cb6SWei Liu pending_idx_release[gop - queue->tx_unmap_ops] =
1340f53c3fe8SZoltan Kiss pending_idx;
1341e9ce7cb6SWei Liu queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1342e9ce7cb6SWei Liu queue->mmap_pages[pending_idx];
1343f53c3fe8SZoltan Kiss gnttab_set_unmap_op(gop,
1344e9ce7cb6SWei Liu idx_to_kaddr(queue, pending_idx),
1345f53c3fe8SZoltan Kiss GNTMAP_host_map,
1346e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx]);
1347e9ce7cb6SWei Liu xenvif_grant_handle_reset(queue, pending_idx);
1348f53c3fe8SZoltan Kiss ++gop;
1349f53c3fe8SZoltan Kiss }
1350f53c3fe8SZoltan Kiss
1351e9ce7cb6SWei Liu } while (dp != queue->dealloc_prod);
1352f53c3fe8SZoltan Kiss
1353e9ce7cb6SWei Liu queue->dealloc_cons = dc;
1354f53c3fe8SZoltan Kiss
1355e9ce7cb6SWei Liu if (gop - queue->tx_unmap_ops > 0) {
1356f53c3fe8SZoltan Kiss int ret;
1357e9ce7cb6SWei Liu ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1358f53c3fe8SZoltan Kiss NULL,
1359e9ce7cb6SWei Liu queue->pages_to_unmap,
1360e9ce7cb6SWei Liu gop - queue->tx_unmap_ops);
1361f53c3fe8SZoltan Kiss if (ret) {
136268946159SJulien Grall netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1363e9ce7cb6SWei Liu gop - queue->tx_unmap_ops, ret);
1364e9ce7cb6SWei Liu for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1365f53c3fe8SZoltan Kiss if (gop[i].status != GNTST_okay)
1366e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
136768946159SJulien Grall " host_addr: 0x%llx handle: 0x%x status: %d\n",
1368f53c3fe8SZoltan Kiss gop[i].host_addr,
1369f53c3fe8SZoltan Kiss gop[i].handle,
1370f53c3fe8SZoltan Kiss gop[i].status);
1371f53c3fe8SZoltan Kiss }
1372f53c3fe8SZoltan Kiss BUG();
1373f53c3fe8SZoltan Kiss }
1374f53c3fe8SZoltan Kiss }
1375f53c3fe8SZoltan Kiss
1376e9ce7cb6SWei Liu for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1377e9ce7cb6SWei Liu xenvif_idx_release(queue, pending_idx_release[i],
1378f53c3fe8SZoltan Kiss XEN_NETIF_RSP_OKAY);
1379f53c3fe8SZoltan Kiss }
1380f53c3fe8SZoltan Kiss
13813e2234b3SZoltan Kiss
1382f942dc25SIan Campbell /* Called after netfront has transmitted */
xenvif_tx_action(struct xenvif_queue * queue,int budget)1383e9ce7cb6SWei Liu int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1384f942dc25SIan Campbell {
1385ad7f402aSRoss Lagerwall unsigned nr_mops = 0, nr_cops = 0;
1386f53c3fe8SZoltan Kiss int work_done, ret;
1387f942dc25SIan Campbell
1388e9ce7cb6SWei Liu if (unlikely(!tx_work_todo(queue)))
1389b3f980bdSWei Liu return 0;
1390b3f980bdSWei Liu
1391e9ce7cb6SWei Liu xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1392f942dc25SIan Campbell
1393bdab8275SZoltan Kiss if (nr_cops == 0)
1394b3f980bdSWei Liu return 0;
1395c571898fSAndres Lagar-Cavilla
1396e9ce7cb6SWei Liu gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
13972991397dSJan Beulich if (nr_mops != 0) {
1398e9ce7cb6SWei Liu ret = gnttab_map_refs(queue->tx_map_ops,
1399f53c3fe8SZoltan Kiss NULL,
1400e9ce7cb6SWei Liu queue->pages_to_map,
14019074ce24SZoltan Kiss nr_mops);
14022991397dSJan Beulich if (ret) {
14032991397dSJan Beulich unsigned int i;
14042991397dSJan Beulich
14052991397dSJan Beulich netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
14062991397dSJan Beulich nr_mops, ret);
14072991397dSJan Beulich for (i = 0; i < nr_mops; ++i)
14082991397dSJan Beulich WARN_ON_ONCE(queue->tx_map_ops[i].status ==
14092991397dSJan Beulich GNTST_okay);
14102991397dSJan Beulich }
14112991397dSJan Beulich }
1412f942dc25SIan Campbell
1413e9ce7cb6SWei Liu work_done = xenvif_tx_submit(queue);
1414b3f980bdSWei Liu
1415b3f980bdSWei Liu return work_done;
1416f942dc25SIan Campbell }
1417f942dc25SIan Campbell
_make_tx_response(struct xenvif_queue * queue,const struct xen_netif_tx_request * txp,unsigned int extra_count,s8 status)1418*2dc2b0a4SJan Beulich static void _make_tx_response(struct xenvif_queue *queue,
1419*2dc2b0a4SJan Beulich const struct xen_netif_tx_request *txp,
1420562abd39SPaul Durrant unsigned int extra_count,
1421*2dc2b0a4SJan Beulich s8 status)
1422f942dc25SIan Campbell {
1423e9ce7cb6SWei Liu RING_IDX i = queue->tx.rsp_prod_pvt;
1424f942dc25SIan Campbell struct xen_netif_tx_response *resp;
1425f942dc25SIan Campbell
1426e9ce7cb6SWei Liu resp = RING_GET_RESPONSE(&queue->tx, i);
1427f942dc25SIan Campbell resp->id = txp->id;
1428*2dc2b0a4SJan Beulich resp->status = status;
1429f942dc25SIan Campbell
1430562abd39SPaul Durrant while (extra_count-- != 0)
1431e9ce7cb6SWei Liu RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1432f942dc25SIan Campbell
1433e9ce7cb6SWei Liu queue->tx.rsp_prod_pvt = ++i;
1434f942dc25SIan Campbell }
1435f942dc25SIan Campbell
push_tx_responses(struct xenvif_queue * queue)1436c8a4d299SDavid Vrabel static void push_tx_responses(struct xenvif_queue *queue)
1437c8a4d299SDavid Vrabel {
1438c8a4d299SDavid Vrabel int notify;
1439c8a4d299SDavid Vrabel
1440c8a4d299SDavid Vrabel RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1441c8a4d299SDavid Vrabel if (notify)
1442c8a4d299SDavid Vrabel notify_remote_via_irq(queue->tx_irq);
1443c8a4d299SDavid Vrabel }
1444c8a4d299SDavid Vrabel
xenvif_idx_release(struct xenvif_queue * queue,u16 pending_idx,s8 status)1445*2dc2b0a4SJan Beulich static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1446*2dc2b0a4SJan Beulich s8 status)
1447*2dc2b0a4SJan Beulich {
1448*2dc2b0a4SJan Beulich struct pending_tx_info *pending_tx_info;
1449*2dc2b0a4SJan Beulich pending_ring_idx_t index;
1450*2dc2b0a4SJan Beulich unsigned long flags;
1451*2dc2b0a4SJan Beulich
1452*2dc2b0a4SJan Beulich pending_tx_info = &queue->pending_tx_info[pending_idx];
1453*2dc2b0a4SJan Beulich
1454*2dc2b0a4SJan Beulich spin_lock_irqsave(&queue->response_lock, flags);
1455*2dc2b0a4SJan Beulich
1456*2dc2b0a4SJan Beulich _make_tx_response(queue, &pending_tx_info->req,
1457*2dc2b0a4SJan Beulich pending_tx_info->extra_count, status);
1458*2dc2b0a4SJan Beulich
1459*2dc2b0a4SJan Beulich /* Release the pending index before pusing the Tx response so
1460*2dc2b0a4SJan Beulich * its available before a new Tx request is pushed by the
1461*2dc2b0a4SJan Beulich * frontend.
1462*2dc2b0a4SJan Beulich */
1463*2dc2b0a4SJan Beulich index = pending_index(queue->pending_prod++);
1464*2dc2b0a4SJan Beulich queue->pending_ring[index] = pending_idx;
1465*2dc2b0a4SJan Beulich
1466*2dc2b0a4SJan Beulich push_tx_responses(queue);
1467*2dc2b0a4SJan Beulich
1468*2dc2b0a4SJan Beulich spin_unlock_irqrestore(&queue->response_lock, flags);
1469*2dc2b0a4SJan Beulich }
1470*2dc2b0a4SJan Beulich
make_tx_response(struct xenvif_queue * queue,const struct xen_netif_tx_request * txp,unsigned int extra_count,s8 status)1471*2dc2b0a4SJan Beulich static void make_tx_response(struct xenvif_queue *queue,
1472*2dc2b0a4SJan Beulich const struct xen_netif_tx_request *txp,
1473*2dc2b0a4SJan Beulich unsigned int extra_count,
1474*2dc2b0a4SJan Beulich s8 status)
1475*2dc2b0a4SJan Beulich {
1476*2dc2b0a4SJan Beulich unsigned long flags;
1477*2dc2b0a4SJan Beulich
1478*2dc2b0a4SJan Beulich spin_lock_irqsave(&queue->response_lock, flags);
1479*2dc2b0a4SJan Beulich
1480*2dc2b0a4SJan Beulich _make_tx_response(queue, txp, extra_count, status);
1481*2dc2b0a4SJan Beulich push_tx_responses(queue);
1482*2dc2b0a4SJan Beulich
1483*2dc2b0a4SJan Beulich spin_unlock_irqrestore(&queue->response_lock, flags);
1484*2dc2b0a4SJan Beulich }
1485*2dc2b0a4SJan Beulich
xenvif_idx_unmap(struct xenvif_queue * queue,u16 pending_idx)14865834e72eSJuergen Gross static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1487f53c3fe8SZoltan Kiss {
1488f53c3fe8SZoltan Kiss int ret;
1489f53c3fe8SZoltan Kiss struct gnttab_unmap_grant_ref tx_unmap_op;
1490f53c3fe8SZoltan Kiss
1491f53c3fe8SZoltan Kiss gnttab_set_unmap_op(&tx_unmap_op,
1492e9ce7cb6SWei Liu idx_to_kaddr(queue, pending_idx),
1493f53c3fe8SZoltan Kiss GNTMAP_host_map,
1494e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx]);
1495e9ce7cb6SWei Liu xenvif_grant_handle_reset(queue, pending_idx);
1496f53c3fe8SZoltan Kiss
1497f53c3fe8SZoltan Kiss ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1498e9ce7cb6SWei Liu &queue->mmap_pages[pending_idx], 1);
14997aceb47aSZoltan Kiss if (ret) {
1500e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
150168946159SJulien Grall "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
15027aceb47aSZoltan Kiss ret,
15037aceb47aSZoltan Kiss pending_idx,
15047aceb47aSZoltan Kiss tx_unmap_op.host_addr,
15057aceb47aSZoltan Kiss tx_unmap_op.handle,
15067aceb47aSZoltan Kiss tx_unmap_op.status);
15077aceb47aSZoltan Kiss BUG();
15087aceb47aSZoltan Kiss }
1509f53c3fe8SZoltan Kiss }
1510f53c3fe8SZoltan Kiss
tx_work_todo(struct xenvif_queue * queue)1511e9ce7cb6SWei Liu static inline int tx_work_todo(struct xenvif_queue *queue)
1512f942dc25SIan Campbell {
1513e9ce7cb6SWei Liu if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1514f942dc25SIan Campbell return 1;
1515f942dc25SIan Campbell
1516f942dc25SIan Campbell return 0;
1517f942dc25SIan Campbell }
1518f942dc25SIan Campbell
tx_dealloc_work_todo(struct xenvif_queue * queue)1519e9ce7cb6SWei Liu static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1520f53c3fe8SZoltan Kiss {
1521e9ce7cb6SWei Liu return queue->dealloc_cons != queue->dealloc_prod;
1522f53c3fe8SZoltan Kiss }
1523f53c3fe8SZoltan Kiss
xenvif_unmap_frontend_data_rings(struct xenvif_queue * queue)15244e15ee2cSPaul Durrant void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1525f942dc25SIan Campbell {
1526e9ce7cb6SWei Liu if (queue->tx.sring)
1527e9ce7cb6SWei Liu xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1528e9ce7cb6SWei Liu queue->tx.sring);
1529e9ce7cb6SWei Liu if (queue->rx.sring)
1530e9ce7cb6SWei Liu xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1531e9ce7cb6SWei Liu queue->rx.sring);
1532f942dc25SIan Campbell }
1533f942dc25SIan Campbell
xenvif_map_frontend_data_rings(struct xenvif_queue * queue,grant_ref_t tx_ring_ref,grant_ref_t rx_ring_ref)15344e15ee2cSPaul Durrant int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1535f942dc25SIan Campbell grant_ref_t tx_ring_ref,
1536f942dc25SIan Campbell grant_ref_t rx_ring_ref)
1537f942dc25SIan Campbell {
1538c9d63699SDavid Vrabel void *addr;
1539f942dc25SIan Campbell struct xen_netif_tx_sring *txs;
1540f942dc25SIan Campbell struct xen_netif_rx_sring *rxs;
15419476654bSPaul Durrant RING_IDX rsp_prod, req_prod;
1542bacc8dafSColin Ian King int err;
1543f942dc25SIan Campbell
1544e9ce7cb6SWei Liu err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1545ccc9d90aSWei Liu &tx_ring_ref, 1, &addr);
1546c9d63699SDavid Vrabel if (err)
1547f942dc25SIan Campbell goto err;
1548f942dc25SIan Campbell
1549c9d63699SDavid Vrabel txs = (struct xen_netif_tx_sring *)addr;
15509476654bSPaul Durrant rsp_prod = READ_ONCE(txs->rsp_prod);
15519476654bSPaul Durrant req_prod = READ_ONCE(txs->req_prod);
15529476654bSPaul Durrant
15539476654bSPaul Durrant BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
15549476654bSPaul Durrant
15559476654bSPaul Durrant err = -EIO;
15569476654bSPaul Durrant if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
15579476654bSPaul Durrant goto err;
1558f942dc25SIan Campbell
1559e9ce7cb6SWei Liu err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1560ccc9d90aSWei Liu &rx_ring_ref, 1, &addr);
1561c9d63699SDavid Vrabel if (err)
1562f942dc25SIan Campbell goto err;
1563f942dc25SIan Campbell
1564c9d63699SDavid Vrabel rxs = (struct xen_netif_rx_sring *)addr;
15659476654bSPaul Durrant rsp_prod = READ_ONCE(rxs->rsp_prod);
15669476654bSPaul Durrant req_prod = READ_ONCE(rxs->req_prod);
15679476654bSPaul Durrant
15689476654bSPaul Durrant BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
15699476654bSPaul Durrant
15709476654bSPaul Durrant err = -EIO;
15719476654bSPaul Durrant if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
15729476654bSPaul Durrant goto err;
1573f942dc25SIan Campbell
1574f942dc25SIan Campbell return 0;
1575f942dc25SIan Campbell
1576f942dc25SIan Campbell err:
15774e15ee2cSPaul Durrant xenvif_unmap_frontend_data_rings(queue);
1578f942dc25SIan Campbell return err;
1579f942dc25SIan Campbell }
1580f942dc25SIan Campbell
xenvif_dealloc_kthread_should_stop(struct xenvif_queue * queue)1581a64bd934SWei Liu static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1582a64bd934SWei Liu {
1583a64bd934SWei Liu /* Dealloc thread must remain running until all inflight
1584a64bd934SWei Liu * packets complete.
1585a64bd934SWei Liu */
1586a64bd934SWei Liu return kthread_should_stop() &&
1587a64bd934SWei Liu !atomic_read(&queue->inflight_packets);
1588a64bd934SWei Liu }
1589a64bd934SWei Liu
xenvif_dealloc_kthread(void * data)1590f53c3fe8SZoltan Kiss int xenvif_dealloc_kthread(void *data)
1591f53c3fe8SZoltan Kiss {
1592e9ce7cb6SWei Liu struct xenvif_queue *queue = data;
1593f53c3fe8SZoltan Kiss
1594a64bd934SWei Liu for (;;) {
1595e9ce7cb6SWei Liu wait_event_interruptible(queue->dealloc_wq,
1596e9ce7cb6SWei Liu tx_dealloc_work_todo(queue) ||
1597a64bd934SWei Liu xenvif_dealloc_kthread_should_stop(queue));
1598a64bd934SWei Liu if (xenvif_dealloc_kthread_should_stop(queue))
1599f53c3fe8SZoltan Kiss break;
1600f53c3fe8SZoltan Kiss
1601e9ce7cb6SWei Liu xenvif_tx_dealloc_action(queue);
1602f53c3fe8SZoltan Kiss cond_resched();
1603f53c3fe8SZoltan Kiss }
1604f53c3fe8SZoltan Kiss
1605f53c3fe8SZoltan Kiss /* Unmap anything remaining*/
1606e9ce7cb6SWei Liu if (tx_dealloc_work_todo(queue))
1607e9ce7cb6SWei Liu xenvif_tx_dealloc_action(queue);
1608f53c3fe8SZoltan Kiss
1609f53c3fe8SZoltan Kiss return 0;
1610f53c3fe8SZoltan Kiss }
1611f53c3fe8SZoltan Kiss
make_ctrl_response(struct xenvif * vif,const struct xen_netif_ctrl_request * req,u32 status,u32 data)16124e15ee2cSPaul Durrant static void make_ctrl_response(struct xenvif *vif,
16134e15ee2cSPaul Durrant const struct xen_netif_ctrl_request *req,
16144e15ee2cSPaul Durrant u32 status, u32 data)
16154e15ee2cSPaul Durrant {
16164e15ee2cSPaul Durrant RING_IDX idx = vif->ctrl.rsp_prod_pvt;
16174e15ee2cSPaul Durrant struct xen_netif_ctrl_response rsp = {
16184e15ee2cSPaul Durrant .id = req->id,
16194e15ee2cSPaul Durrant .type = req->type,
16204e15ee2cSPaul Durrant .status = status,
16214e15ee2cSPaul Durrant .data = data,
16224e15ee2cSPaul Durrant };
16234e15ee2cSPaul Durrant
16244e15ee2cSPaul Durrant *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
16254e15ee2cSPaul Durrant vif->ctrl.rsp_prod_pvt = ++idx;
16264e15ee2cSPaul Durrant }
16274e15ee2cSPaul Durrant
push_ctrl_response(struct xenvif * vif)16284e15ee2cSPaul Durrant static void push_ctrl_response(struct xenvif *vif)
16294e15ee2cSPaul Durrant {
16304e15ee2cSPaul Durrant int notify;
16314e15ee2cSPaul Durrant
16324e15ee2cSPaul Durrant RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
16334e15ee2cSPaul Durrant if (notify)
16344e15ee2cSPaul Durrant notify_remote_via_irq(vif->ctrl_irq);
16354e15ee2cSPaul Durrant }
16364e15ee2cSPaul Durrant
process_ctrl_request(struct xenvif * vif,const struct xen_netif_ctrl_request * req)16374e15ee2cSPaul Durrant static void process_ctrl_request(struct xenvif *vif,
16384e15ee2cSPaul Durrant const struct xen_netif_ctrl_request *req)
16394e15ee2cSPaul Durrant {
164040d8abdeSPaul Durrant u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
164140d8abdeSPaul Durrant u32 data = 0;
164240d8abdeSPaul Durrant
164340d8abdeSPaul Durrant switch (req->type) {
164440d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
164540d8abdeSPaul Durrant status = xenvif_set_hash_alg(vif, req->data[0]);
164640d8abdeSPaul Durrant break;
164740d8abdeSPaul Durrant
164840d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
164940d8abdeSPaul Durrant status = xenvif_get_hash_flags(vif, &data);
165040d8abdeSPaul Durrant break;
165140d8abdeSPaul Durrant
165240d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
165340d8abdeSPaul Durrant status = xenvif_set_hash_flags(vif, req->data[0]);
165440d8abdeSPaul Durrant break;
165540d8abdeSPaul Durrant
165640d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
165740d8abdeSPaul Durrant status = xenvif_set_hash_key(vif, req->data[0],
165840d8abdeSPaul Durrant req->data[1]);
165940d8abdeSPaul Durrant break;
166040d8abdeSPaul Durrant
166140d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
166240d8abdeSPaul Durrant status = XEN_NETIF_CTRL_STATUS_SUCCESS;
166340d8abdeSPaul Durrant data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
166440d8abdeSPaul Durrant break;
166540d8abdeSPaul Durrant
166640d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
166740d8abdeSPaul Durrant status = xenvif_set_hash_mapping_size(vif,
166840d8abdeSPaul Durrant req->data[0]);
166940d8abdeSPaul Durrant break;
167040d8abdeSPaul Durrant
167140d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
167240d8abdeSPaul Durrant status = xenvif_set_hash_mapping(vif, req->data[0],
167340d8abdeSPaul Durrant req->data[1],
167440d8abdeSPaul Durrant req->data[2]);
167540d8abdeSPaul Durrant break;
167640d8abdeSPaul Durrant
167740d8abdeSPaul Durrant default:
167840d8abdeSPaul Durrant break;
167940d8abdeSPaul Durrant }
168040d8abdeSPaul Durrant
168140d8abdeSPaul Durrant make_ctrl_response(vif, req, status, data);
16824e15ee2cSPaul Durrant push_ctrl_response(vif);
16834e15ee2cSPaul Durrant }
16844e15ee2cSPaul Durrant
xenvif_ctrl_action(struct xenvif * vif)16854e15ee2cSPaul Durrant static void xenvif_ctrl_action(struct xenvif *vif)
16864e15ee2cSPaul Durrant {
16874e15ee2cSPaul Durrant for (;;) {
16884e15ee2cSPaul Durrant RING_IDX req_prod, req_cons;
16894e15ee2cSPaul Durrant
16904e15ee2cSPaul Durrant req_prod = vif->ctrl.sring->req_prod;
16914e15ee2cSPaul Durrant req_cons = vif->ctrl.req_cons;
16924e15ee2cSPaul Durrant
16934e15ee2cSPaul Durrant /* Make sure we can see requests before we process them. */
16944e15ee2cSPaul Durrant rmb();
16954e15ee2cSPaul Durrant
16964e15ee2cSPaul Durrant if (req_cons == req_prod)
16974e15ee2cSPaul Durrant break;
16984e15ee2cSPaul Durrant
16994e15ee2cSPaul Durrant while (req_cons != req_prod) {
17004e15ee2cSPaul Durrant struct xen_netif_ctrl_request req;
17014e15ee2cSPaul Durrant
17024e15ee2cSPaul Durrant RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
17034e15ee2cSPaul Durrant req_cons++;
17044e15ee2cSPaul Durrant
17054e15ee2cSPaul Durrant process_ctrl_request(vif, &req);
17064e15ee2cSPaul Durrant }
17074e15ee2cSPaul Durrant
17084e15ee2cSPaul Durrant vif->ctrl.req_cons = req_cons;
17094e15ee2cSPaul Durrant vif->ctrl.sring->req_event = req_cons + 1;
17104e15ee2cSPaul Durrant }
17114e15ee2cSPaul Durrant }
17124e15ee2cSPaul Durrant
xenvif_ctrl_work_todo(struct xenvif * vif)17134e15ee2cSPaul Durrant static bool xenvif_ctrl_work_todo(struct xenvif *vif)
17144e15ee2cSPaul Durrant {
17154e15ee2cSPaul Durrant if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1716d3e2a25bSGustavo A. R. Silva return true;
17174e15ee2cSPaul Durrant
1718d3e2a25bSGustavo A. R. Silva return false;
17194e15ee2cSPaul Durrant }
17204e15ee2cSPaul Durrant
xenvif_ctrl_irq_fn(int irq,void * data)17210364a882SJuergen Gross irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
17224e15ee2cSPaul Durrant {
17234e15ee2cSPaul Durrant struct xenvif *vif = data;
172423025393SJuergen Gross unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
17254e15ee2cSPaul Durrant
172623025393SJuergen Gross while (xenvif_ctrl_work_todo(vif)) {
17274e15ee2cSPaul Durrant xenvif_ctrl_action(vif);
172823025393SJuergen Gross eoi_flag = 0;
172923025393SJuergen Gross }
173023025393SJuergen Gross
173123025393SJuergen Gross xen_irq_lateeoi(irq, eoi_flag);
17324e15ee2cSPaul Durrant
17330364a882SJuergen Gross return IRQ_HANDLED;
17344e15ee2cSPaul Durrant }
17354e15ee2cSPaul Durrant
netback_init(void)1736f942dc25SIan Campbell static int __init netback_init(void)
1737f942dc25SIan Campbell {
1738f942dc25SIan Campbell int rc = 0;
1739f942dc25SIan Campbell
17402a14b244SDaniel De Graaf if (!xen_domain())
1741f942dc25SIan Campbell return -ENODEV;
1742f942dc25SIan Campbell
174356dd5af9SJuergen Gross /* Allow as many queues as there are CPUs but max. 8 if user has not
17444c82ac3cSWei Liu * specified a value.
17454c82ac3cSWei Liu */
17464c82ac3cSWei Liu if (xenvif_max_queues == 0)
174756dd5af9SJuergen Gross xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
174856dd5af9SJuergen Gross num_online_cpus());
17498d3d53b3SAndrew J. Bennieston
175037641494SWei Liu if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1751383eda32SJoe Perches pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
175237641494SWei Liu fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
175337641494SWei Liu fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
17542810e5b9SWei Liu }
17552810e5b9SWei Liu
1756f942dc25SIan Campbell rc = xenvif_xenbus_init();
1757f942dc25SIan Campbell if (rc)
1758f942dc25SIan Campbell goto failed_init;
1759f942dc25SIan Campbell
1760f51de243SZoltan Kiss #ifdef CONFIG_DEBUG_FS
1761f51de243SZoltan Kiss xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1762f51de243SZoltan Kiss #endif /* CONFIG_DEBUG_FS */
1763f51de243SZoltan Kiss
1764f942dc25SIan Campbell return 0;
1765f942dc25SIan Campbell
1766f942dc25SIan Campbell failed_init:
1767f942dc25SIan Campbell return rc;
1768f942dc25SIan Campbell }
1769f942dc25SIan Campbell
1770f942dc25SIan Campbell module_init(netback_init);
1771f942dc25SIan Campbell
netback_fini(void)1772b103f358SWei Liu static void __exit netback_fini(void)
1773b103f358SWei Liu {
1774f51de243SZoltan Kiss #ifdef CONFIG_DEBUG_FS
1775f51de243SZoltan Kiss debugfs_remove_recursive(xen_netback_dbg_root);
1776f51de243SZoltan Kiss #endif /* CONFIG_DEBUG_FS */
1777b103f358SWei Liu xenvif_xenbus_fini();
1778b103f358SWei Liu }
1779b103f358SWei Liu module_exit(netback_fini);
1780b103f358SWei Liu
1781f942dc25SIan Campbell MODULE_LICENSE("Dual BSD/GPL");
1782f984cec6SBastian Blank MODULE_ALIAS("xen-backend:vif");
1783