xref: /openbmc/linux/net/xdp/xsk.c (revision 2ae2e7cf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/netdev_rx_queue.h>
29 #include <net/xdp.h>
30 
31 #include "xsk_queue.h"
32 #include "xdp_umem.h"
33 #include "xsk.h"
34 
35 #define TX_BATCH_SIZE 32
36 
37 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
38 
39 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
40 {
41 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
42 		return;
43 
44 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
45 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
46 }
47 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
48 
49 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
50 {
51 	struct xdp_sock *xs;
52 
53 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
54 		return;
55 
56 	rcu_read_lock();
57 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
58 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
59 	}
60 	rcu_read_unlock();
61 
62 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
63 }
64 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
65 
66 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
67 {
68 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
69 		return;
70 
71 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
72 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
73 }
74 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
75 
76 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77 {
78 	struct xdp_sock *xs;
79 
80 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
81 		return;
82 
83 	rcu_read_lock();
84 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
85 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
86 	}
87 	rcu_read_unlock();
88 
89 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
90 }
91 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
92 
93 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
94 {
95 	return pool->uses_need_wakeup;
96 }
97 EXPORT_SYMBOL(xsk_uses_need_wakeup);
98 
99 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
100 					    u16 queue_id)
101 {
102 	if (queue_id < dev->real_num_rx_queues)
103 		return dev->_rx[queue_id].pool;
104 	if (queue_id < dev->real_num_tx_queues)
105 		return dev->_tx[queue_id].pool;
106 
107 	return NULL;
108 }
109 EXPORT_SYMBOL(xsk_get_pool_from_qid);
110 
111 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
112 {
113 	if (queue_id < dev->num_rx_queues)
114 		dev->_rx[queue_id].pool = NULL;
115 	if (queue_id < dev->num_tx_queues)
116 		dev->_tx[queue_id].pool = NULL;
117 }
118 
119 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
120  * not know if the device has more tx queues than rx, or the opposite.
121  * This might also change during run time.
122  */
123 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
124 			u16 queue_id)
125 {
126 	if (queue_id >= max_t(unsigned int,
127 			      dev->real_num_rx_queues,
128 			      dev->real_num_tx_queues))
129 		return -EINVAL;
130 
131 	if (queue_id < dev->real_num_rx_queues)
132 		dev->_rx[queue_id].pool = pool;
133 	if (queue_id < dev->real_num_tx_queues)
134 		dev->_tx[queue_id].pool = pool;
135 
136 	return 0;
137 }
138 
139 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
140 			u32 flags)
141 {
142 	u64 addr;
143 	int err;
144 
145 	addr = xp_get_handle(xskb);
146 	err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
147 	if (err) {
148 		xs->rx_queue_full++;
149 		return err;
150 	}
151 
152 	xp_release(xskb);
153 	return 0;
154 }
155 
156 static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
157 {
158 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
159 	u32 frags = xdp_buff_has_frags(xdp);
160 	struct xdp_buff_xsk *pos, *tmp;
161 	struct list_head *xskb_list;
162 	u32 contd = 0;
163 	int err;
164 
165 	if (frags)
166 		contd = XDP_PKT_CONTD;
167 
168 	err = __xsk_rcv_zc(xs, xskb, len, contd);
169 	if (err || likely(!frags))
170 		goto out;
171 
172 	xskb_list = &xskb->pool->xskb_list;
173 	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
174 		if (list_is_singular(xskb_list))
175 			contd = 0;
176 		len = pos->xdp.data_end - pos->xdp.data;
177 		err = __xsk_rcv_zc(xs, pos, len, contd);
178 		if (err)
179 			return err;
180 		list_del(&pos->xskb_list_node);
181 	}
182 
183 out:
184 	return err;
185 }
186 
187 static void *xsk_copy_xdp_start(struct xdp_buff *from)
188 {
189 	if (unlikely(xdp_data_meta_unsupported(from)))
190 		return from->data;
191 	else
192 		return from->data_meta;
193 }
194 
195 static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
196 			u32 *from_len, skb_frag_t **frag, u32 rem)
197 {
198 	u32 copied = 0;
199 
200 	while (1) {
201 		u32 copy_len = min_t(u32, *from_len, to_len);
202 
203 		memcpy(to, *from, copy_len);
204 		copied += copy_len;
205 		if (rem == copied)
206 			return copied;
207 
208 		if (*from_len == copy_len) {
209 			*from = skb_frag_address(*frag);
210 			*from_len = skb_frag_size((*frag)++);
211 		} else {
212 			*from += copy_len;
213 			*from_len -= copy_len;
214 		}
215 		if (to_len == copy_len)
216 			return copied;
217 
218 		to_len -= copy_len;
219 		to += copy_len;
220 	}
221 }
222 
223 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
224 {
225 	u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
226 	void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
227 	u32 from_len, meta_len, rem, num_desc;
228 	struct xdp_buff_xsk *xskb;
229 	struct xdp_buff *xsk_xdp;
230 	skb_frag_t *frag;
231 
232 	from_len = xdp->data_end - copy_from;
233 	meta_len = xdp->data - copy_from;
234 	rem = len + meta_len;
235 
236 	if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
237 		int err;
238 
239 		xsk_xdp = xsk_buff_alloc(xs->pool);
240 		if (!xsk_xdp) {
241 			xs->rx_dropped++;
242 			return -ENOMEM;
243 		}
244 		memcpy(xsk_xdp->data - meta_len, copy_from, rem);
245 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
246 		err = __xsk_rcv_zc(xs, xskb, len, 0);
247 		if (err) {
248 			xsk_buff_free(xsk_xdp);
249 			return err;
250 		}
251 
252 		return 0;
253 	}
254 
255 	num_desc = (len - 1) / frame_size + 1;
256 
257 	if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
258 		xs->rx_dropped++;
259 		return -ENOMEM;
260 	}
261 	if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
262 		xs->rx_queue_full++;
263 		return -ENOBUFS;
264 	}
265 
266 	if (xdp_buff_has_frags(xdp)) {
267 		struct skb_shared_info *sinfo;
268 
269 		sinfo = xdp_get_shared_info_from_buff(xdp);
270 		frag =  &sinfo->frags[0];
271 	}
272 
273 	do {
274 		u32 to_len = frame_size + meta_len;
275 		u32 copied;
276 
277 		xsk_xdp = xsk_buff_alloc(xs->pool);
278 		copy_to = xsk_xdp->data - meta_len;
279 
280 		copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
281 		rem -= copied;
282 
283 		xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
284 		__xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
285 		meta_len = 0;
286 	} while (rem);
287 
288 	return 0;
289 }
290 
291 static bool xsk_tx_writeable(struct xdp_sock *xs)
292 {
293 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
294 		return false;
295 
296 	return true;
297 }
298 
299 static bool xsk_is_bound(struct xdp_sock *xs)
300 {
301 	if (READ_ONCE(xs->state) == XSK_BOUND) {
302 		/* Matches smp_wmb() in bind(). */
303 		smp_rmb();
304 		return true;
305 	}
306 	return false;
307 }
308 
309 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
310 {
311 	if (!xsk_is_bound(xs))
312 		return -ENXIO;
313 
314 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
315 		return -EINVAL;
316 
317 	if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
318 		xs->rx_dropped++;
319 		return -ENOSPC;
320 	}
321 
322 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
323 	return 0;
324 }
325 
326 static void xsk_flush(struct xdp_sock *xs)
327 {
328 	xskq_prod_submit(xs->rx);
329 	__xskq_cons_release(xs->pool->fq);
330 	sock_def_readable(&xs->sk);
331 }
332 
333 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
334 {
335 	u32 len = xdp_get_buff_len(xdp);
336 	int err;
337 
338 	spin_lock_bh(&xs->rx_lock);
339 	err = xsk_rcv_check(xs, xdp, len);
340 	if (!err) {
341 		err = __xsk_rcv(xs, xdp, len);
342 		xsk_flush(xs);
343 	}
344 	spin_unlock_bh(&xs->rx_lock);
345 	return err;
346 }
347 
348 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
349 {
350 	u32 len = xdp_get_buff_len(xdp);
351 	int err;
352 
353 	err = xsk_rcv_check(xs, xdp, len);
354 	if (err)
355 		return err;
356 
357 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
358 		len = xdp->data_end - xdp->data;
359 		return xsk_rcv_zc(xs, xdp, len);
360 	}
361 
362 	err = __xsk_rcv(xs, xdp, len);
363 	if (!err)
364 		xdp_return_buff(xdp);
365 	return err;
366 }
367 
368 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
369 {
370 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
371 	int err;
372 
373 	err = xsk_rcv(xs, xdp);
374 	if (err)
375 		return err;
376 
377 	if (!xs->flush_node.prev)
378 		list_add(&xs->flush_node, flush_list);
379 
380 	return 0;
381 }
382 
383 void __xsk_map_flush(void)
384 {
385 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
386 	struct xdp_sock *xs, *tmp;
387 
388 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
389 		xsk_flush(xs);
390 		__list_del_clearprev(&xs->flush_node);
391 	}
392 }
393 
394 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
395 {
396 	xskq_prod_submit_n(pool->cq, nb_entries);
397 }
398 EXPORT_SYMBOL(xsk_tx_completed);
399 
400 void xsk_tx_release(struct xsk_buff_pool *pool)
401 {
402 	struct xdp_sock *xs;
403 
404 	rcu_read_lock();
405 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
406 		__xskq_cons_release(xs->tx);
407 		if (xsk_tx_writeable(xs))
408 			xs->sk.sk_write_space(&xs->sk);
409 	}
410 	rcu_read_unlock();
411 }
412 EXPORT_SYMBOL(xsk_tx_release);
413 
414 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
415 {
416 	struct xdp_sock *xs;
417 
418 	rcu_read_lock();
419 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
420 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
421 			if (xskq_has_descs(xs->tx))
422 				xskq_cons_release(xs->tx);
423 			continue;
424 		}
425 
426 		/* This is the backpressure mechanism for the Tx path.
427 		 * Reserve space in the completion queue and only proceed
428 		 * if there is space in it. This avoids having to implement
429 		 * any buffering in the Tx path.
430 		 */
431 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
432 			goto out;
433 
434 		xskq_cons_release(xs->tx);
435 		rcu_read_unlock();
436 		return true;
437 	}
438 
439 out:
440 	rcu_read_unlock();
441 	return false;
442 }
443 EXPORT_SYMBOL(xsk_tx_peek_desc);
444 
445 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
446 {
447 	struct xdp_desc *descs = pool->tx_descs;
448 	u32 nb_pkts = 0;
449 
450 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
451 		nb_pkts++;
452 
453 	xsk_tx_release(pool);
454 	return nb_pkts;
455 }
456 
457 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
458 {
459 	struct xdp_sock *xs;
460 
461 	rcu_read_lock();
462 	if (!list_is_singular(&pool->xsk_tx_list)) {
463 		/* Fallback to the non-batched version */
464 		rcu_read_unlock();
465 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
466 	}
467 
468 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
469 	if (!xs) {
470 		nb_pkts = 0;
471 		goto out;
472 	}
473 
474 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
475 
476 	/* This is the backpressure mechanism for the Tx path. Try to
477 	 * reserve space in the completion queue for all packets, but
478 	 * if there are fewer slots available, just process that many
479 	 * packets. This avoids having to implement any buffering in
480 	 * the Tx path.
481 	 */
482 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
483 	if (!nb_pkts)
484 		goto out;
485 
486 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
487 	if (!nb_pkts) {
488 		xs->tx->queue_empty_descs++;
489 		goto out;
490 	}
491 
492 	__xskq_cons_release(xs->tx);
493 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
494 	xs->sk.sk_write_space(&xs->sk);
495 
496 out:
497 	rcu_read_unlock();
498 	return nb_pkts;
499 }
500 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
501 
502 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
503 {
504 	struct net_device *dev = xs->dev;
505 
506 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
507 }
508 
509 static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
510 {
511 	unsigned long flags;
512 	int ret;
513 
514 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
515 	ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
516 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
517 
518 	return ret;
519 }
520 
521 static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
522 {
523 	unsigned long flags;
524 
525 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
526 	xskq_prod_submit_n(xs->pool->cq, n);
527 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
528 }
529 
530 static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
531 {
532 	unsigned long flags;
533 
534 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
535 	xskq_prod_cancel_n(xs->pool->cq, n);
536 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
537 }
538 
539 static u32 xsk_get_num_desc(struct sk_buff *skb)
540 {
541 	return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
542 }
543 
544 static void xsk_destruct_skb(struct sk_buff *skb)
545 {
546 	xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
547 	sock_wfree(skb);
548 }
549 
550 static void xsk_set_destructor_arg(struct sk_buff *skb)
551 {
552 	long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
553 
554 	skb_shinfo(skb)->destructor_arg = (void *)num;
555 }
556 
557 static void xsk_consume_skb(struct sk_buff *skb)
558 {
559 	struct xdp_sock *xs = xdp_sk(skb->sk);
560 
561 	skb->destructor = sock_wfree;
562 	xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
563 	/* Free skb without triggering the perf drop trace */
564 	consume_skb(skb);
565 	xs->skb = NULL;
566 }
567 
568 static void xsk_drop_skb(struct sk_buff *skb)
569 {
570 	xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
571 	xsk_consume_skb(skb);
572 }
573 
574 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
575 					      struct xdp_desc *desc)
576 {
577 	struct xsk_buff_pool *pool = xs->pool;
578 	u32 hr, len, ts, offset, copy, copied;
579 	struct sk_buff *skb = xs->skb;
580 	struct page *page;
581 	void *buffer;
582 	int err, i;
583 	u64 addr;
584 
585 	if (!skb) {
586 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
587 
588 		skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
589 		if (unlikely(!skb))
590 			return ERR_PTR(err);
591 
592 		skb_reserve(skb, hr);
593 	}
594 
595 	addr = desc->addr;
596 	len = desc->len;
597 	ts = pool->unaligned ? len : pool->chunk_size;
598 
599 	buffer = xsk_buff_raw_get_data(pool, addr);
600 	offset = offset_in_page(buffer);
601 	addr = buffer - pool->addrs;
602 
603 	for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
604 		if (unlikely(i >= MAX_SKB_FRAGS))
605 			return ERR_PTR(-EOVERFLOW);
606 
607 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
608 		get_page(page);
609 
610 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
611 		skb_fill_page_desc(skb, i, page, offset, copy);
612 
613 		copied += copy;
614 		addr += copy;
615 		offset = 0;
616 	}
617 
618 	skb->len += len;
619 	skb->data_len += len;
620 	skb->truesize += ts;
621 
622 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
623 
624 	return skb;
625 }
626 
627 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
628 				     struct xdp_desc *desc)
629 {
630 	struct net_device *dev = xs->dev;
631 	struct sk_buff *skb = xs->skb;
632 	int err;
633 
634 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
635 		skb = xsk_build_skb_zerocopy(xs, desc);
636 		if (IS_ERR(skb)) {
637 			err = PTR_ERR(skb);
638 			goto free_err;
639 		}
640 	} else {
641 		u32 hr, tr, len;
642 		void *buffer;
643 
644 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
645 		len = desc->len;
646 
647 		if (!skb) {
648 			hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
649 			tr = dev->needed_tailroom;
650 			skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
651 			if (unlikely(!skb))
652 				goto free_err;
653 
654 			skb_reserve(skb, hr);
655 			skb_put(skb, len);
656 
657 			err = skb_store_bits(skb, 0, buffer, len);
658 			if (unlikely(err)) {
659 				kfree_skb(skb);
660 				goto free_err;
661 			}
662 		} else {
663 			int nr_frags = skb_shinfo(skb)->nr_frags;
664 			struct page *page;
665 			u8 *vaddr;
666 
667 			if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
668 				err = -EOVERFLOW;
669 				goto free_err;
670 			}
671 
672 			page = alloc_page(xs->sk.sk_allocation);
673 			if (unlikely(!page)) {
674 				err = -EAGAIN;
675 				goto free_err;
676 			}
677 
678 			vaddr = kmap_local_page(page);
679 			memcpy(vaddr, buffer, len);
680 			kunmap_local(vaddr);
681 
682 			skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
683 		}
684 	}
685 
686 	skb->dev = dev;
687 	skb->priority = xs->sk.sk_priority;
688 	skb->mark = READ_ONCE(xs->sk.sk_mark);
689 	skb->destructor = xsk_destruct_skb;
690 	xsk_set_destructor_arg(skb);
691 
692 	return skb;
693 
694 free_err:
695 	if (err == -EOVERFLOW) {
696 		/* Drop the packet */
697 		xsk_set_destructor_arg(xs->skb);
698 		xsk_drop_skb(xs->skb);
699 		xskq_cons_release(xs->tx);
700 	} else {
701 		/* Let application retry */
702 		xsk_cq_cancel_locked(xs, 1);
703 	}
704 
705 	return ERR_PTR(err);
706 }
707 
708 static int __xsk_generic_xmit(struct sock *sk)
709 {
710 	struct xdp_sock *xs = xdp_sk(sk);
711 	u32 max_batch = TX_BATCH_SIZE;
712 	bool sent_frame = false;
713 	struct xdp_desc desc;
714 	struct sk_buff *skb;
715 	int err = 0;
716 
717 	mutex_lock(&xs->mutex);
718 
719 	/* Since we dropped the RCU read lock, the socket state might have changed. */
720 	if (unlikely(!xsk_is_bound(xs))) {
721 		err = -ENXIO;
722 		goto out;
723 	}
724 
725 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
726 		goto out;
727 
728 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
729 		if (max_batch-- == 0) {
730 			err = -EAGAIN;
731 			goto out;
732 		}
733 
734 		/* This is the backpressure mechanism for the Tx path.
735 		 * Reserve space in the completion queue and only proceed
736 		 * if there is space in it. This avoids having to implement
737 		 * any buffering in the Tx path.
738 		 */
739 		if (xsk_cq_reserve_addr_locked(xs, desc.addr))
740 			goto out;
741 
742 		skb = xsk_build_skb(xs, &desc);
743 		if (IS_ERR(skb)) {
744 			err = PTR_ERR(skb);
745 			if (err != -EOVERFLOW)
746 				goto out;
747 			err = 0;
748 			continue;
749 		}
750 
751 		xskq_cons_release(xs->tx);
752 
753 		if (xp_mb_desc(&desc)) {
754 			xs->skb = skb;
755 			continue;
756 		}
757 
758 		err = __dev_direct_xmit(skb, xs->queue_id);
759 		if  (err == NETDEV_TX_BUSY) {
760 			/* Tell user-space to retry the send */
761 			xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
762 			xsk_consume_skb(skb);
763 			err = -EAGAIN;
764 			goto out;
765 		}
766 
767 		/* Ignore NET_XMIT_CN as packet might have been sent */
768 		if (err == NET_XMIT_DROP) {
769 			/* SKB completed but not sent */
770 			err = -EBUSY;
771 			xs->skb = NULL;
772 			goto out;
773 		}
774 
775 		sent_frame = true;
776 		xs->skb = NULL;
777 	}
778 
779 	if (xskq_has_descs(xs->tx)) {
780 		if (xs->skb)
781 			xsk_drop_skb(xs->skb);
782 		xskq_cons_release(xs->tx);
783 	}
784 
785 out:
786 	if (sent_frame)
787 		if (xsk_tx_writeable(xs))
788 			sk->sk_write_space(sk);
789 
790 	mutex_unlock(&xs->mutex);
791 	return err;
792 }
793 
794 static int xsk_generic_xmit(struct sock *sk)
795 {
796 	int ret;
797 
798 	/* Drop the RCU lock since the SKB path might sleep. */
799 	rcu_read_unlock();
800 	ret = __xsk_generic_xmit(sk);
801 	/* Reaquire RCU lock before going into common code. */
802 	rcu_read_lock();
803 
804 	return ret;
805 }
806 
807 static bool xsk_no_wakeup(struct sock *sk)
808 {
809 #ifdef CONFIG_NET_RX_BUSY_POLL
810 	/* Prefer busy-polling, skip the wakeup. */
811 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
812 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
813 #else
814 	return false;
815 #endif
816 }
817 
818 static int xsk_check_common(struct xdp_sock *xs)
819 {
820 	if (unlikely(!xsk_is_bound(xs)))
821 		return -ENXIO;
822 	if (unlikely(!(xs->dev->flags & IFF_UP)))
823 		return -ENETDOWN;
824 
825 	return 0;
826 }
827 
828 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
829 {
830 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
831 	struct sock *sk = sock->sk;
832 	struct xdp_sock *xs = xdp_sk(sk);
833 	struct xsk_buff_pool *pool;
834 	int err;
835 
836 	err = xsk_check_common(xs);
837 	if (err)
838 		return err;
839 	if (unlikely(need_wait))
840 		return -EOPNOTSUPP;
841 	if (unlikely(!xs->tx))
842 		return -ENOBUFS;
843 
844 	if (sk_can_busy_loop(sk)) {
845 		if (xs->zc)
846 			__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
847 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
848 	}
849 
850 	if (xs->zc && xsk_no_wakeup(sk))
851 		return 0;
852 
853 	pool = xs->pool;
854 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
855 		if (xs->zc)
856 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
857 		return xsk_generic_xmit(sk);
858 	}
859 	return 0;
860 }
861 
862 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
863 {
864 	int ret;
865 
866 	rcu_read_lock();
867 	ret = __xsk_sendmsg(sock, m, total_len);
868 	rcu_read_unlock();
869 
870 	return ret;
871 }
872 
873 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
874 {
875 	bool need_wait = !(flags & MSG_DONTWAIT);
876 	struct sock *sk = sock->sk;
877 	struct xdp_sock *xs = xdp_sk(sk);
878 	int err;
879 
880 	err = xsk_check_common(xs);
881 	if (err)
882 		return err;
883 	if (unlikely(!xs->rx))
884 		return -ENOBUFS;
885 	if (unlikely(need_wait))
886 		return -EOPNOTSUPP;
887 
888 	if (sk_can_busy_loop(sk))
889 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
890 
891 	if (xsk_no_wakeup(sk))
892 		return 0;
893 
894 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
895 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
896 	return 0;
897 }
898 
899 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
900 {
901 	int ret;
902 
903 	rcu_read_lock();
904 	ret = __xsk_recvmsg(sock, m, len, flags);
905 	rcu_read_unlock();
906 
907 	return ret;
908 }
909 
910 static __poll_t xsk_poll(struct file *file, struct socket *sock,
911 			     struct poll_table_struct *wait)
912 {
913 	__poll_t mask = 0;
914 	struct sock *sk = sock->sk;
915 	struct xdp_sock *xs = xdp_sk(sk);
916 	struct xsk_buff_pool *pool;
917 
918 	sock_poll_wait(file, sock, wait);
919 
920 	rcu_read_lock();
921 	if (xsk_check_common(xs))
922 		goto out;
923 
924 	pool = xs->pool;
925 
926 	if (pool->cached_need_wakeup) {
927 		if (xs->zc)
928 			xsk_wakeup(xs, pool->cached_need_wakeup);
929 		else if (xs->tx)
930 			/* Poll needs to drive Tx also in copy mode */
931 			xsk_generic_xmit(sk);
932 	}
933 
934 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
935 		mask |= EPOLLIN | EPOLLRDNORM;
936 	if (xs->tx && xsk_tx_writeable(xs))
937 		mask |= EPOLLOUT | EPOLLWRNORM;
938 out:
939 	rcu_read_unlock();
940 	return mask;
941 }
942 
943 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
944 			  bool umem_queue)
945 {
946 	struct xsk_queue *q;
947 
948 	if (entries == 0 || *queue || !is_power_of_2(entries))
949 		return -EINVAL;
950 
951 	q = xskq_create(entries, umem_queue);
952 	if (!q)
953 		return -ENOMEM;
954 
955 	/* Make sure queue is ready before it can be seen by others */
956 	smp_wmb();
957 	WRITE_ONCE(*queue, q);
958 	return 0;
959 }
960 
961 static void xsk_unbind_dev(struct xdp_sock *xs)
962 {
963 	struct net_device *dev = xs->dev;
964 
965 	if (xs->state != XSK_BOUND)
966 		return;
967 	WRITE_ONCE(xs->state, XSK_UNBOUND);
968 
969 	/* Wait for driver to stop using the xdp socket. */
970 	xp_del_xsk(xs->pool, xs);
971 	synchronize_net();
972 	dev_put(dev);
973 }
974 
975 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
976 					      struct xdp_sock __rcu ***map_entry)
977 {
978 	struct xsk_map *map = NULL;
979 	struct xsk_map_node *node;
980 
981 	*map_entry = NULL;
982 
983 	spin_lock_bh(&xs->map_list_lock);
984 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
985 					node);
986 	if (node) {
987 		bpf_map_inc(&node->map->map);
988 		map = node->map;
989 		*map_entry = node->map_entry;
990 	}
991 	spin_unlock_bh(&xs->map_list_lock);
992 	return map;
993 }
994 
995 static void xsk_delete_from_maps(struct xdp_sock *xs)
996 {
997 	/* This function removes the current XDP socket from all the
998 	 * maps it resides in. We need to take extra care here, due to
999 	 * the two locks involved. Each map has a lock synchronizing
1000 	 * updates to the entries, and each socket has a lock that
1001 	 * synchronizes access to the list of maps (map_list). For
1002 	 * deadlock avoidance the locks need to be taken in the order
1003 	 * "map lock"->"socket map list lock". We start off by
1004 	 * accessing the socket map list, and take a reference to the
1005 	 * map to guarantee existence between the
1006 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1007 	 * calls. Then we ask the map to remove the socket, which
1008 	 * tries to remove the socket from the map. Note that there
1009 	 * might be updates to the map between
1010 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1011 	 */
1012 	struct xdp_sock __rcu **map_entry = NULL;
1013 	struct xsk_map *map;
1014 
1015 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1016 		xsk_map_try_sock_delete(map, xs, map_entry);
1017 		bpf_map_put(&map->map);
1018 	}
1019 }
1020 
1021 static int xsk_release(struct socket *sock)
1022 {
1023 	struct sock *sk = sock->sk;
1024 	struct xdp_sock *xs = xdp_sk(sk);
1025 	struct net *net;
1026 
1027 	if (!sk)
1028 		return 0;
1029 
1030 	net = sock_net(sk);
1031 
1032 	if (xs->skb)
1033 		xsk_drop_skb(xs->skb);
1034 
1035 	mutex_lock(&net->xdp.lock);
1036 	sk_del_node_init_rcu(sk);
1037 	mutex_unlock(&net->xdp.lock);
1038 
1039 	sock_prot_inuse_add(net, sk->sk_prot, -1);
1040 
1041 	xsk_delete_from_maps(xs);
1042 	mutex_lock(&xs->mutex);
1043 	xsk_unbind_dev(xs);
1044 	mutex_unlock(&xs->mutex);
1045 
1046 	xskq_destroy(xs->rx);
1047 	xskq_destroy(xs->tx);
1048 	xskq_destroy(xs->fq_tmp);
1049 	xskq_destroy(xs->cq_tmp);
1050 
1051 	sock_orphan(sk);
1052 	sock->sk = NULL;
1053 
1054 	sock_put(sk);
1055 
1056 	return 0;
1057 }
1058 
1059 static struct socket *xsk_lookup_xsk_from_fd(int fd)
1060 {
1061 	struct socket *sock;
1062 	int err;
1063 
1064 	sock = sockfd_lookup(fd, &err);
1065 	if (!sock)
1066 		return ERR_PTR(-ENOTSOCK);
1067 
1068 	if (sock->sk->sk_family != PF_XDP) {
1069 		sockfd_put(sock);
1070 		return ERR_PTR(-ENOPROTOOPT);
1071 	}
1072 
1073 	return sock;
1074 }
1075 
1076 static bool xsk_validate_queues(struct xdp_sock *xs)
1077 {
1078 	return xs->fq_tmp && xs->cq_tmp;
1079 }
1080 
1081 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1082 {
1083 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1084 	struct sock *sk = sock->sk;
1085 	struct xdp_sock *xs = xdp_sk(sk);
1086 	struct net_device *dev;
1087 	int bound_dev_if;
1088 	u32 flags, qid;
1089 	int err = 0;
1090 
1091 	if (addr_len < sizeof(struct sockaddr_xdp))
1092 		return -EINVAL;
1093 	if (sxdp->sxdp_family != AF_XDP)
1094 		return -EINVAL;
1095 
1096 	flags = sxdp->sxdp_flags;
1097 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
1098 		      XDP_USE_NEED_WAKEUP | XDP_USE_SG))
1099 		return -EINVAL;
1100 
1101 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1102 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1103 		return -EINVAL;
1104 
1105 	rtnl_lock();
1106 	mutex_lock(&xs->mutex);
1107 	if (xs->state != XSK_READY) {
1108 		err = -EBUSY;
1109 		goto out_release;
1110 	}
1111 
1112 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1113 	if (!dev) {
1114 		err = -ENODEV;
1115 		goto out_release;
1116 	}
1117 
1118 	if (!xs->rx && !xs->tx) {
1119 		err = -EINVAL;
1120 		goto out_unlock;
1121 	}
1122 
1123 	qid = sxdp->sxdp_queue_id;
1124 
1125 	if (flags & XDP_SHARED_UMEM) {
1126 		struct xdp_sock *umem_xs;
1127 		struct socket *sock;
1128 
1129 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
1130 		    (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
1131 			/* Cannot specify flags for shared sockets. */
1132 			err = -EINVAL;
1133 			goto out_unlock;
1134 		}
1135 
1136 		if (xs->umem) {
1137 			/* We have already our own. */
1138 			err = -EINVAL;
1139 			goto out_unlock;
1140 		}
1141 
1142 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1143 		if (IS_ERR(sock)) {
1144 			err = PTR_ERR(sock);
1145 			goto out_unlock;
1146 		}
1147 
1148 		umem_xs = xdp_sk(sock->sk);
1149 		if (!xsk_is_bound(umem_xs)) {
1150 			err = -EBADF;
1151 			sockfd_put(sock);
1152 			goto out_unlock;
1153 		}
1154 
1155 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1156 			/* Share the umem with another socket on another qid
1157 			 * and/or device.
1158 			 */
1159 			xs->pool = xp_create_and_assign_umem(xs,
1160 							     umem_xs->umem);
1161 			if (!xs->pool) {
1162 				err = -ENOMEM;
1163 				sockfd_put(sock);
1164 				goto out_unlock;
1165 			}
1166 
1167 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1168 						   qid);
1169 			if (err) {
1170 				xp_destroy(xs->pool);
1171 				xs->pool = NULL;
1172 				sockfd_put(sock);
1173 				goto out_unlock;
1174 			}
1175 		} else {
1176 			/* Share the buffer pool with the other socket. */
1177 			if (xs->fq_tmp || xs->cq_tmp) {
1178 				/* Do not allow setting your own fq or cq. */
1179 				err = -EINVAL;
1180 				sockfd_put(sock);
1181 				goto out_unlock;
1182 			}
1183 
1184 			xp_get_pool(umem_xs->pool);
1185 			xs->pool = umem_xs->pool;
1186 
1187 			/* If underlying shared umem was created without Tx
1188 			 * ring, allocate Tx descs array that Tx batching API
1189 			 * utilizes
1190 			 */
1191 			if (xs->tx && !xs->pool->tx_descs) {
1192 				err = xp_alloc_tx_descs(xs->pool, xs);
1193 				if (err) {
1194 					xp_put_pool(xs->pool);
1195 					xs->pool = NULL;
1196 					sockfd_put(sock);
1197 					goto out_unlock;
1198 				}
1199 			}
1200 		}
1201 
1202 		xdp_get_umem(umem_xs->umem);
1203 		WRITE_ONCE(xs->umem, umem_xs->umem);
1204 		sockfd_put(sock);
1205 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1206 		err = -EINVAL;
1207 		goto out_unlock;
1208 	} else {
1209 		/* This xsk has its own umem. */
1210 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1211 		if (!xs->pool) {
1212 			err = -ENOMEM;
1213 			goto out_unlock;
1214 		}
1215 
1216 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1217 		if (err) {
1218 			xp_destroy(xs->pool);
1219 			xs->pool = NULL;
1220 			goto out_unlock;
1221 		}
1222 	}
1223 
1224 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1225 	xs->fq_tmp = NULL;
1226 	xs->cq_tmp = NULL;
1227 
1228 	xs->dev = dev;
1229 	xs->zc = xs->umem->zc;
1230 	xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
1231 	xs->queue_id = qid;
1232 	xp_add_xsk(xs->pool, xs);
1233 
1234 out_unlock:
1235 	if (err) {
1236 		dev_put(dev);
1237 	} else {
1238 		/* Matches smp_rmb() in bind() for shared umem
1239 		 * sockets, and xsk_is_bound().
1240 		 */
1241 		smp_wmb();
1242 		WRITE_ONCE(xs->state, XSK_BOUND);
1243 	}
1244 out_release:
1245 	mutex_unlock(&xs->mutex);
1246 	rtnl_unlock();
1247 	return err;
1248 }
1249 
1250 struct xdp_umem_reg_v1 {
1251 	__u64 addr; /* Start of packet data area */
1252 	__u64 len; /* Length of packet data area */
1253 	__u32 chunk_size;
1254 	__u32 headroom;
1255 };
1256 
1257 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1258 			  sockptr_t optval, unsigned int optlen)
1259 {
1260 	struct sock *sk = sock->sk;
1261 	struct xdp_sock *xs = xdp_sk(sk);
1262 	int err;
1263 
1264 	if (level != SOL_XDP)
1265 		return -ENOPROTOOPT;
1266 
1267 	switch (optname) {
1268 	case XDP_RX_RING:
1269 	case XDP_TX_RING:
1270 	{
1271 		struct xsk_queue **q;
1272 		int entries;
1273 
1274 		if (optlen < sizeof(entries))
1275 			return -EINVAL;
1276 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1277 			return -EFAULT;
1278 
1279 		mutex_lock(&xs->mutex);
1280 		if (xs->state != XSK_READY) {
1281 			mutex_unlock(&xs->mutex);
1282 			return -EBUSY;
1283 		}
1284 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1285 		err = xsk_init_queue(entries, q, false);
1286 		if (!err && optname == XDP_TX_RING)
1287 			/* Tx needs to be explicitly woken up the first time */
1288 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1289 		mutex_unlock(&xs->mutex);
1290 		return err;
1291 	}
1292 	case XDP_UMEM_REG:
1293 	{
1294 		size_t mr_size = sizeof(struct xdp_umem_reg);
1295 		struct xdp_umem_reg mr = {};
1296 		struct xdp_umem *umem;
1297 
1298 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1299 			return -EINVAL;
1300 		else if (optlen < sizeof(mr))
1301 			mr_size = sizeof(struct xdp_umem_reg_v1);
1302 
1303 		if (copy_from_sockptr(&mr, optval, mr_size))
1304 			return -EFAULT;
1305 
1306 		mutex_lock(&xs->mutex);
1307 		if (xs->state != XSK_READY || xs->umem) {
1308 			mutex_unlock(&xs->mutex);
1309 			return -EBUSY;
1310 		}
1311 
1312 		umem = xdp_umem_create(&mr);
1313 		if (IS_ERR(umem)) {
1314 			mutex_unlock(&xs->mutex);
1315 			return PTR_ERR(umem);
1316 		}
1317 
1318 		/* Make sure umem is ready before it can be seen by others */
1319 		smp_wmb();
1320 		WRITE_ONCE(xs->umem, umem);
1321 		mutex_unlock(&xs->mutex);
1322 		return 0;
1323 	}
1324 	case XDP_UMEM_FILL_RING:
1325 	case XDP_UMEM_COMPLETION_RING:
1326 	{
1327 		struct xsk_queue **q;
1328 		int entries;
1329 
1330 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1331 			return -EFAULT;
1332 
1333 		mutex_lock(&xs->mutex);
1334 		if (xs->state != XSK_READY) {
1335 			mutex_unlock(&xs->mutex);
1336 			return -EBUSY;
1337 		}
1338 
1339 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1340 			&xs->cq_tmp;
1341 		err = xsk_init_queue(entries, q, true);
1342 		mutex_unlock(&xs->mutex);
1343 		return err;
1344 	}
1345 	default:
1346 		break;
1347 	}
1348 
1349 	return -ENOPROTOOPT;
1350 }
1351 
1352 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1353 {
1354 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1355 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1356 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1357 }
1358 
1359 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1360 {
1361 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1362 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1363 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1364 }
1365 
1366 struct xdp_statistics_v1 {
1367 	__u64 rx_dropped;
1368 	__u64 rx_invalid_descs;
1369 	__u64 tx_invalid_descs;
1370 };
1371 
1372 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1373 			  char __user *optval, int __user *optlen)
1374 {
1375 	struct sock *sk = sock->sk;
1376 	struct xdp_sock *xs = xdp_sk(sk);
1377 	int len;
1378 
1379 	if (level != SOL_XDP)
1380 		return -ENOPROTOOPT;
1381 
1382 	if (get_user(len, optlen))
1383 		return -EFAULT;
1384 	if (len < 0)
1385 		return -EINVAL;
1386 
1387 	switch (optname) {
1388 	case XDP_STATISTICS:
1389 	{
1390 		struct xdp_statistics stats = {};
1391 		bool extra_stats = true;
1392 		size_t stats_size;
1393 
1394 		if (len < sizeof(struct xdp_statistics_v1)) {
1395 			return -EINVAL;
1396 		} else if (len < sizeof(stats)) {
1397 			extra_stats = false;
1398 			stats_size = sizeof(struct xdp_statistics_v1);
1399 		} else {
1400 			stats_size = sizeof(stats);
1401 		}
1402 
1403 		mutex_lock(&xs->mutex);
1404 		stats.rx_dropped = xs->rx_dropped;
1405 		if (extra_stats) {
1406 			stats.rx_ring_full = xs->rx_queue_full;
1407 			stats.rx_fill_ring_empty_descs =
1408 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1409 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1410 		} else {
1411 			stats.rx_dropped += xs->rx_queue_full;
1412 		}
1413 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1414 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1415 		mutex_unlock(&xs->mutex);
1416 
1417 		if (copy_to_user(optval, &stats, stats_size))
1418 			return -EFAULT;
1419 		if (put_user(stats_size, optlen))
1420 			return -EFAULT;
1421 
1422 		return 0;
1423 	}
1424 	case XDP_MMAP_OFFSETS:
1425 	{
1426 		struct xdp_mmap_offsets off;
1427 		struct xdp_mmap_offsets_v1 off_v1;
1428 		bool flags_supported = true;
1429 		void *to_copy;
1430 
1431 		if (len < sizeof(off_v1))
1432 			return -EINVAL;
1433 		else if (len < sizeof(off))
1434 			flags_supported = false;
1435 
1436 		if (flags_supported) {
1437 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1438 			 * except for the flags field added to the end.
1439 			 */
1440 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1441 					       &off.rx);
1442 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1443 					       &off.tx);
1444 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1445 					       &off.fr);
1446 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1447 					       &off.cr);
1448 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1449 						ptrs.flags);
1450 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1451 						ptrs.flags);
1452 			off.fr.flags = offsetof(struct xdp_umem_ring,
1453 						ptrs.flags);
1454 			off.cr.flags = offsetof(struct xdp_umem_ring,
1455 						ptrs.flags);
1456 
1457 			len = sizeof(off);
1458 			to_copy = &off;
1459 		} else {
1460 			xsk_enter_rxtx_offsets(&off_v1.rx);
1461 			xsk_enter_rxtx_offsets(&off_v1.tx);
1462 			xsk_enter_umem_offsets(&off_v1.fr);
1463 			xsk_enter_umem_offsets(&off_v1.cr);
1464 
1465 			len = sizeof(off_v1);
1466 			to_copy = &off_v1;
1467 		}
1468 
1469 		if (copy_to_user(optval, to_copy, len))
1470 			return -EFAULT;
1471 		if (put_user(len, optlen))
1472 			return -EFAULT;
1473 
1474 		return 0;
1475 	}
1476 	case XDP_OPTIONS:
1477 	{
1478 		struct xdp_options opts = {};
1479 
1480 		if (len < sizeof(opts))
1481 			return -EINVAL;
1482 
1483 		mutex_lock(&xs->mutex);
1484 		if (xs->zc)
1485 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1486 		mutex_unlock(&xs->mutex);
1487 
1488 		len = sizeof(opts);
1489 		if (copy_to_user(optval, &opts, len))
1490 			return -EFAULT;
1491 		if (put_user(len, optlen))
1492 			return -EFAULT;
1493 
1494 		return 0;
1495 	}
1496 	default:
1497 		break;
1498 	}
1499 
1500 	return -EOPNOTSUPP;
1501 }
1502 
1503 static int xsk_mmap(struct file *file, struct socket *sock,
1504 		    struct vm_area_struct *vma)
1505 {
1506 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1507 	unsigned long size = vma->vm_end - vma->vm_start;
1508 	struct xdp_sock *xs = xdp_sk(sock->sk);
1509 	int state = READ_ONCE(xs->state);
1510 	struct xsk_queue *q = NULL;
1511 
1512 	if (state != XSK_READY && state != XSK_BOUND)
1513 		return -EBUSY;
1514 
1515 	if (offset == XDP_PGOFF_RX_RING) {
1516 		q = READ_ONCE(xs->rx);
1517 	} else if (offset == XDP_PGOFF_TX_RING) {
1518 		q = READ_ONCE(xs->tx);
1519 	} else {
1520 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1521 		smp_rmb();
1522 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1523 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1524 						 READ_ONCE(xs->pool->fq);
1525 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1526 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1527 						 READ_ONCE(xs->pool->cq);
1528 	}
1529 
1530 	if (!q)
1531 		return -EINVAL;
1532 
1533 	/* Matches the smp_wmb() in xsk_init_queue */
1534 	smp_rmb();
1535 	if (size > q->ring_vmalloc_size)
1536 		return -EINVAL;
1537 
1538 	return remap_vmalloc_range(vma, q->ring, 0);
1539 }
1540 
1541 static int xsk_notifier(struct notifier_block *this,
1542 			unsigned long msg, void *ptr)
1543 {
1544 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1545 	struct net *net = dev_net(dev);
1546 	struct sock *sk;
1547 
1548 	switch (msg) {
1549 	case NETDEV_UNREGISTER:
1550 		mutex_lock(&net->xdp.lock);
1551 		sk_for_each(sk, &net->xdp.list) {
1552 			struct xdp_sock *xs = xdp_sk(sk);
1553 
1554 			mutex_lock(&xs->mutex);
1555 			if (xs->dev == dev) {
1556 				sk->sk_err = ENETDOWN;
1557 				if (!sock_flag(sk, SOCK_DEAD))
1558 					sk_error_report(sk);
1559 
1560 				xsk_unbind_dev(xs);
1561 
1562 				/* Clear device references. */
1563 				xp_clear_dev(xs->pool);
1564 			}
1565 			mutex_unlock(&xs->mutex);
1566 		}
1567 		mutex_unlock(&net->xdp.lock);
1568 		break;
1569 	}
1570 	return NOTIFY_DONE;
1571 }
1572 
1573 static struct proto xsk_proto = {
1574 	.name =		"XDP",
1575 	.owner =	THIS_MODULE,
1576 	.obj_size =	sizeof(struct xdp_sock),
1577 };
1578 
1579 static const struct proto_ops xsk_proto_ops = {
1580 	.family		= PF_XDP,
1581 	.owner		= THIS_MODULE,
1582 	.release	= xsk_release,
1583 	.bind		= xsk_bind,
1584 	.connect	= sock_no_connect,
1585 	.socketpair	= sock_no_socketpair,
1586 	.accept		= sock_no_accept,
1587 	.getname	= sock_no_getname,
1588 	.poll		= xsk_poll,
1589 	.ioctl		= sock_no_ioctl,
1590 	.listen		= sock_no_listen,
1591 	.shutdown	= sock_no_shutdown,
1592 	.setsockopt	= xsk_setsockopt,
1593 	.getsockopt	= xsk_getsockopt,
1594 	.sendmsg	= xsk_sendmsg,
1595 	.recvmsg	= xsk_recvmsg,
1596 	.mmap		= xsk_mmap,
1597 };
1598 
1599 static void xsk_destruct(struct sock *sk)
1600 {
1601 	struct xdp_sock *xs = xdp_sk(sk);
1602 
1603 	if (!sock_flag(sk, SOCK_DEAD))
1604 		return;
1605 
1606 	if (!xp_put_pool(xs->pool))
1607 		xdp_put_umem(xs->umem, !xs->pool);
1608 }
1609 
1610 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1611 		      int kern)
1612 {
1613 	struct xdp_sock *xs;
1614 	struct sock *sk;
1615 
1616 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1617 		return -EPERM;
1618 	if (sock->type != SOCK_RAW)
1619 		return -ESOCKTNOSUPPORT;
1620 
1621 	if (protocol)
1622 		return -EPROTONOSUPPORT;
1623 
1624 	sock->state = SS_UNCONNECTED;
1625 
1626 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1627 	if (!sk)
1628 		return -ENOBUFS;
1629 
1630 	sock->ops = &xsk_proto_ops;
1631 
1632 	sock_init_data(sock, sk);
1633 
1634 	sk->sk_family = PF_XDP;
1635 
1636 	sk->sk_destruct = xsk_destruct;
1637 
1638 	sock_set_flag(sk, SOCK_RCU_FREE);
1639 
1640 	xs = xdp_sk(sk);
1641 	xs->state = XSK_READY;
1642 	mutex_init(&xs->mutex);
1643 	spin_lock_init(&xs->rx_lock);
1644 
1645 	INIT_LIST_HEAD(&xs->map_list);
1646 	spin_lock_init(&xs->map_list_lock);
1647 
1648 	mutex_lock(&net->xdp.lock);
1649 	sk_add_node_rcu(sk, &net->xdp.list);
1650 	mutex_unlock(&net->xdp.lock);
1651 
1652 	sock_prot_inuse_add(net, &xsk_proto, 1);
1653 
1654 	return 0;
1655 }
1656 
1657 static const struct net_proto_family xsk_family_ops = {
1658 	.family = PF_XDP,
1659 	.create = xsk_create,
1660 	.owner	= THIS_MODULE,
1661 };
1662 
1663 static struct notifier_block xsk_netdev_notifier = {
1664 	.notifier_call	= xsk_notifier,
1665 };
1666 
1667 static int __net_init xsk_net_init(struct net *net)
1668 {
1669 	mutex_init(&net->xdp.lock);
1670 	INIT_HLIST_HEAD(&net->xdp.list);
1671 	return 0;
1672 }
1673 
1674 static void __net_exit xsk_net_exit(struct net *net)
1675 {
1676 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1677 }
1678 
1679 static struct pernet_operations xsk_net_ops = {
1680 	.init = xsk_net_init,
1681 	.exit = xsk_net_exit,
1682 };
1683 
1684 static int __init xsk_init(void)
1685 {
1686 	int err, cpu;
1687 
1688 	err = proto_register(&xsk_proto, 0 /* no slab */);
1689 	if (err)
1690 		goto out;
1691 
1692 	err = sock_register(&xsk_family_ops);
1693 	if (err)
1694 		goto out_proto;
1695 
1696 	err = register_pernet_subsys(&xsk_net_ops);
1697 	if (err)
1698 		goto out_sk;
1699 
1700 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1701 	if (err)
1702 		goto out_pernet;
1703 
1704 	for_each_possible_cpu(cpu)
1705 		INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1706 	return 0;
1707 
1708 out_pernet:
1709 	unregister_pernet_subsys(&xsk_net_ops);
1710 out_sk:
1711 	sock_unregister(PF_XDP);
1712 out_proto:
1713 	proto_unregister(&xsk_proto);
1714 out:
1715 	return err;
1716 }
1717 
1718 fs_initcall(xsk_init);
1719