xref: /openbmc/linux/net/xdp/xsk.c (revision b755c25f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <linux/vmalloc.h>
26 #include <net/xdp_sock_drv.h>
27 #include <net/busy_poll.h>
28 #include <net/xdp.h>
29 
30 #include "xsk_queue.h"
31 #include "xdp_umem.h"
32 #include "xsk.h"
33 
34 #define TX_BATCH_SIZE 32
35 
36 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
37 
38 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
39 {
40 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
41 		return;
42 
43 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
44 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
45 }
46 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
47 
48 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
49 {
50 	struct xdp_sock *xs;
51 
52 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
53 		return;
54 
55 	rcu_read_lock();
56 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
57 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
58 	}
59 	rcu_read_unlock();
60 
61 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
62 }
63 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
64 
65 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
66 {
67 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
68 		return;
69 
70 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
71 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
72 }
73 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
74 
75 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
76 {
77 	struct xdp_sock *xs;
78 
79 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
80 		return;
81 
82 	rcu_read_lock();
83 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
84 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
85 	}
86 	rcu_read_unlock();
87 
88 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
89 }
90 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
91 
92 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
93 {
94 	return pool->uses_need_wakeup;
95 }
96 EXPORT_SYMBOL(xsk_uses_need_wakeup);
97 
98 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
99 					    u16 queue_id)
100 {
101 	if (queue_id < dev->real_num_rx_queues)
102 		return dev->_rx[queue_id].pool;
103 	if (queue_id < dev->real_num_tx_queues)
104 		return dev->_tx[queue_id].pool;
105 
106 	return NULL;
107 }
108 EXPORT_SYMBOL(xsk_get_pool_from_qid);
109 
110 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
111 {
112 	if (queue_id < dev->num_rx_queues)
113 		dev->_rx[queue_id].pool = NULL;
114 	if (queue_id < dev->num_tx_queues)
115 		dev->_tx[queue_id].pool = NULL;
116 }
117 
118 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
119  * not know if the device has more tx queues than rx, or the opposite.
120  * This might also change during run time.
121  */
122 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
123 			u16 queue_id)
124 {
125 	if (queue_id >= max_t(unsigned int,
126 			      dev->real_num_rx_queues,
127 			      dev->real_num_tx_queues))
128 		return -EINVAL;
129 
130 	if (queue_id < dev->real_num_rx_queues)
131 		dev->_rx[queue_id].pool = pool;
132 	if (queue_id < dev->real_num_tx_queues)
133 		dev->_tx[queue_id].pool = pool;
134 
135 	return 0;
136 }
137 
138 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
139 {
140 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
141 	u64 addr;
142 	int err;
143 
144 	addr = xp_get_handle(xskb);
145 	err = xskq_prod_reserve_desc(xs->rx, addr, len);
146 	if (err) {
147 		xs->rx_queue_full++;
148 		return err;
149 	}
150 
151 	xp_release(xskb);
152 	return 0;
153 }
154 
155 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
156 {
157 	void *from_buf, *to_buf;
158 	u32 metalen;
159 
160 	if (unlikely(xdp_data_meta_unsupported(from))) {
161 		from_buf = from->data;
162 		to_buf = to->data;
163 		metalen = 0;
164 	} else {
165 		from_buf = from->data_meta;
166 		metalen = from->data - from->data_meta;
167 		to_buf = to->data - metalen;
168 	}
169 
170 	memcpy(to_buf, from_buf, len + metalen);
171 }
172 
173 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
174 {
175 	struct xdp_buff *xsk_xdp;
176 	int err;
177 	u32 len;
178 
179 	len = xdp->data_end - xdp->data;
180 	if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
181 		xs->rx_dropped++;
182 		return -ENOSPC;
183 	}
184 
185 	xsk_xdp = xsk_buff_alloc(xs->pool);
186 	if (!xsk_xdp) {
187 		xs->rx_dropped++;
188 		return -ENOMEM;
189 	}
190 
191 	xsk_copy_xdp(xsk_xdp, xdp, len);
192 	err = __xsk_rcv_zc(xs, xsk_xdp, len);
193 	if (err) {
194 		xsk_buff_free(xsk_xdp);
195 		return err;
196 	}
197 	return 0;
198 }
199 
200 static bool xsk_tx_writeable(struct xdp_sock *xs)
201 {
202 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
203 		return false;
204 
205 	return true;
206 }
207 
208 static bool xsk_is_bound(struct xdp_sock *xs)
209 {
210 	if (READ_ONCE(xs->state) == XSK_BOUND) {
211 		/* Matches smp_wmb() in bind(). */
212 		smp_rmb();
213 		return true;
214 	}
215 	return false;
216 }
217 
218 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
219 {
220 	if (!xsk_is_bound(xs))
221 		return -ENXIO;
222 
223 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
224 		return -EINVAL;
225 
226 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
227 	return 0;
228 }
229 
230 static void xsk_flush(struct xdp_sock *xs)
231 {
232 	xskq_prod_submit(xs->rx);
233 	__xskq_cons_release(xs->pool->fq);
234 	sock_def_readable(&xs->sk);
235 }
236 
237 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
238 {
239 	int err;
240 
241 	spin_lock_bh(&xs->rx_lock);
242 	err = xsk_rcv_check(xs, xdp);
243 	if (!err) {
244 		err = __xsk_rcv(xs, xdp);
245 		xsk_flush(xs);
246 	}
247 	spin_unlock_bh(&xs->rx_lock);
248 	return err;
249 }
250 
251 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
252 {
253 	int err;
254 	u32 len;
255 
256 	err = xsk_rcv_check(xs, xdp);
257 	if (err)
258 		return err;
259 
260 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
261 		len = xdp->data_end - xdp->data;
262 		return __xsk_rcv_zc(xs, xdp, len);
263 	}
264 
265 	err = __xsk_rcv(xs, xdp);
266 	if (!err)
267 		xdp_return_buff(xdp);
268 	return err;
269 }
270 
271 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
272 {
273 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
274 	int err;
275 
276 	err = xsk_rcv(xs, xdp);
277 	if (err)
278 		return err;
279 
280 	if (!xs->flush_node.prev)
281 		list_add(&xs->flush_node, flush_list);
282 
283 	return 0;
284 }
285 
286 void __xsk_map_flush(void)
287 {
288 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
289 	struct xdp_sock *xs, *tmp;
290 
291 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
292 		xsk_flush(xs);
293 		__list_del_clearprev(&xs->flush_node);
294 	}
295 }
296 
297 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
298 {
299 	xskq_prod_submit_n(pool->cq, nb_entries);
300 }
301 EXPORT_SYMBOL(xsk_tx_completed);
302 
303 void xsk_tx_release(struct xsk_buff_pool *pool)
304 {
305 	struct xdp_sock *xs;
306 
307 	rcu_read_lock();
308 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
309 		__xskq_cons_release(xs->tx);
310 		if (xsk_tx_writeable(xs))
311 			xs->sk.sk_write_space(&xs->sk);
312 	}
313 	rcu_read_unlock();
314 }
315 EXPORT_SYMBOL(xsk_tx_release);
316 
317 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
318 {
319 	struct xdp_sock *xs;
320 
321 	rcu_read_lock();
322 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
323 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
324 			xs->tx->queue_empty_descs++;
325 			continue;
326 		}
327 
328 		/* This is the backpressure mechanism for the Tx path.
329 		 * Reserve space in the completion queue and only proceed
330 		 * if there is space in it. This avoids having to implement
331 		 * any buffering in the Tx path.
332 		 */
333 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
334 			goto out;
335 
336 		xskq_cons_release(xs->tx);
337 		rcu_read_unlock();
338 		return true;
339 	}
340 
341 out:
342 	rcu_read_unlock();
343 	return false;
344 }
345 EXPORT_SYMBOL(xsk_tx_peek_desc);
346 
347 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
348 {
349 	struct xdp_desc *descs = pool->tx_descs;
350 	u32 nb_pkts = 0;
351 
352 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
353 		nb_pkts++;
354 
355 	xsk_tx_release(pool);
356 	return nb_pkts;
357 }
358 
359 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
360 {
361 	struct xdp_sock *xs;
362 
363 	rcu_read_lock();
364 	if (!list_is_singular(&pool->xsk_tx_list)) {
365 		/* Fallback to the non-batched version */
366 		rcu_read_unlock();
367 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
368 	}
369 
370 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
371 	if (!xs) {
372 		nb_pkts = 0;
373 		goto out;
374 	}
375 
376 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
377 
378 	/* This is the backpressure mechanism for the Tx path. Try to
379 	 * reserve space in the completion queue for all packets, but
380 	 * if there are fewer slots available, just process that many
381 	 * packets. This avoids having to implement any buffering in
382 	 * the Tx path.
383 	 */
384 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
385 	if (!nb_pkts)
386 		goto out;
387 
388 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
389 	if (!nb_pkts) {
390 		xs->tx->queue_empty_descs++;
391 		goto out;
392 	}
393 
394 	__xskq_cons_release(xs->tx);
395 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
396 	xs->sk.sk_write_space(&xs->sk);
397 
398 out:
399 	rcu_read_unlock();
400 	return nb_pkts;
401 }
402 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
403 
404 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
405 {
406 	struct net_device *dev = xs->dev;
407 
408 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
409 }
410 
411 static void xsk_destruct_skb(struct sk_buff *skb)
412 {
413 	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
414 	struct xdp_sock *xs = xdp_sk(skb->sk);
415 	unsigned long flags;
416 
417 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
418 	xskq_prod_submit_addr(xs->pool->cq, addr);
419 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
420 
421 	sock_wfree(skb);
422 }
423 
424 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
425 					      struct xdp_desc *desc)
426 {
427 	struct xsk_buff_pool *pool = xs->pool;
428 	u32 hr, len, ts, offset, copy, copied;
429 	struct sk_buff *skb;
430 	struct page *page;
431 	void *buffer;
432 	int err, i;
433 	u64 addr;
434 
435 	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
436 
437 	skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
438 	if (unlikely(!skb))
439 		return ERR_PTR(err);
440 
441 	skb_reserve(skb, hr);
442 
443 	addr = desc->addr;
444 	len = desc->len;
445 	ts = pool->unaligned ? len : pool->chunk_size;
446 
447 	buffer = xsk_buff_raw_get_data(pool, addr);
448 	offset = offset_in_page(buffer);
449 	addr = buffer - pool->addrs;
450 
451 	for (copied = 0, i = 0; copied < len; i++) {
452 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
453 		get_page(page);
454 
455 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
456 		skb_fill_page_desc(skb, i, page, offset, copy);
457 
458 		copied += copy;
459 		addr += copy;
460 		offset = 0;
461 	}
462 
463 	skb->len += len;
464 	skb->data_len += len;
465 	skb->truesize += ts;
466 
467 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
468 
469 	return skb;
470 }
471 
472 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
473 				     struct xdp_desc *desc)
474 {
475 	struct net_device *dev = xs->dev;
476 	struct sk_buff *skb;
477 
478 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
479 		skb = xsk_build_skb_zerocopy(xs, desc);
480 		if (IS_ERR(skb))
481 			return skb;
482 	} else {
483 		u32 hr, tr, len;
484 		void *buffer;
485 		int err;
486 
487 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
488 		tr = dev->needed_tailroom;
489 		len = desc->len;
490 
491 		skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
492 		if (unlikely(!skb))
493 			return ERR_PTR(err);
494 
495 		skb_reserve(skb, hr);
496 		skb_put(skb, len);
497 
498 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
499 		err = skb_store_bits(skb, 0, buffer, len);
500 		if (unlikely(err)) {
501 			kfree_skb(skb);
502 			return ERR_PTR(err);
503 		}
504 	}
505 
506 	skb->dev = dev;
507 	skb->priority = xs->sk.sk_priority;
508 	skb->mark = READ_ONCE(xs->sk.sk_mark);
509 	skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
510 	skb->destructor = xsk_destruct_skb;
511 
512 	return skb;
513 }
514 
515 static int __xsk_generic_xmit(struct sock *sk)
516 {
517 	struct xdp_sock *xs = xdp_sk(sk);
518 	u32 max_batch = TX_BATCH_SIZE;
519 	bool sent_frame = false;
520 	struct xdp_desc desc;
521 	struct sk_buff *skb;
522 	unsigned long flags;
523 	int err = 0;
524 
525 	mutex_lock(&xs->mutex);
526 
527 	/* Since we dropped the RCU read lock, the socket state might have changed. */
528 	if (unlikely(!xsk_is_bound(xs))) {
529 		err = -ENXIO;
530 		goto out;
531 	}
532 
533 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
534 		goto out;
535 
536 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
537 		if (max_batch-- == 0) {
538 			err = -EAGAIN;
539 			goto out;
540 		}
541 
542 		/* This is the backpressure mechanism for the Tx path.
543 		 * Reserve space in the completion queue and only proceed
544 		 * if there is space in it. This avoids having to implement
545 		 * any buffering in the Tx path.
546 		 */
547 		spin_lock_irqsave(&xs->pool->cq_lock, flags);
548 		if (xskq_prod_reserve(xs->pool->cq)) {
549 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
550 			goto out;
551 		}
552 		spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
553 
554 		skb = xsk_build_skb(xs, &desc);
555 		if (IS_ERR(skb)) {
556 			err = PTR_ERR(skb);
557 			spin_lock_irqsave(&xs->pool->cq_lock, flags);
558 			xskq_prod_cancel(xs->pool->cq);
559 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
560 			goto out;
561 		}
562 
563 		err = __dev_direct_xmit(skb, xs->queue_id);
564 		if  (err == NETDEV_TX_BUSY) {
565 			/* Tell user-space to retry the send */
566 			skb->destructor = sock_wfree;
567 			spin_lock_irqsave(&xs->pool->cq_lock, flags);
568 			xskq_prod_cancel(xs->pool->cq);
569 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
570 			/* Free skb without triggering the perf drop trace */
571 			consume_skb(skb);
572 			err = -EAGAIN;
573 			goto out;
574 		}
575 
576 		xskq_cons_release(xs->tx);
577 		/* Ignore NET_XMIT_CN as packet might have been sent */
578 		if (err == NET_XMIT_DROP) {
579 			/* SKB completed but not sent */
580 			err = -EBUSY;
581 			goto out;
582 		}
583 
584 		sent_frame = true;
585 	}
586 
587 	xs->tx->queue_empty_descs++;
588 
589 out:
590 	if (sent_frame)
591 		if (xsk_tx_writeable(xs))
592 			sk->sk_write_space(sk);
593 
594 	mutex_unlock(&xs->mutex);
595 	return err;
596 }
597 
598 static int xsk_generic_xmit(struct sock *sk)
599 {
600 	int ret;
601 
602 	/* Drop the RCU lock since the SKB path might sleep. */
603 	rcu_read_unlock();
604 	ret = __xsk_generic_xmit(sk);
605 	/* Reaquire RCU lock before going into common code. */
606 	rcu_read_lock();
607 
608 	return ret;
609 }
610 
611 static bool xsk_no_wakeup(struct sock *sk)
612 {
613 #ifdef CONFIG_NET_RX_BUSY_POLL
614 	/* Prefer busy-polling, skip the wakeup. */
615 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
616 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
617 #else
618 	return false;
619 #endif
620 }
621 
622 static int xsk_check_common(struct xdp_sock *xs)
623 {
624 	if (unlikely(!xsk_is_bound(xs)))
625 		return -ENXIO;
626 	if (unlikely(!(xs->dev->flags & IFF_UP)))
627 		return -ENETDOWN;
628 
629 	return 0;
630 }
631 
632 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
633 {
634 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
635 	struct sock *sk = sock->sk;
636 	struct xdp_sock *xs = xdp_sk(sk);
637 	struct xsk_buff_pool *pool;
638 	int err;
639 
640 	err = xsk_check_common(xs);
641 	if (err)
642 		return err;
643 	if (unlikely(need_wait))
644 		return -EOPNOTSUPP;
645 	if (unlikely(!xs->tx))
646 		return -ENOBUFS;
647 
648 	if (sk_can_busy_loop(sk)) {
649 		if (xs->zc)
650 			__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
651 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
652 	}
653 
654 	if (xs->zc && xsk_no_wakeup(sk))
655 		return 0;
656 
657 	pool = xs->pool;
658 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
659 		if (xs->zc)
660 			return xsk_wakeup(xs, XDP_WAKEUP_TX);
661 		return xsk_generic_xmit(sk);
662 	}
663 	return 0;
664 }
665 
666 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
667 {
668 	int ret;
669 
670 	rcu_read_lock();
671 	ret = __xsk_sendmsg(sock, m, total_len);
672 	rcu_read_unlock();
673 
674 	return ret;
675 }
676 
677 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
678 {
679 	bool need_wait = !(flags & MSG_DONTWAIT);
680 	struct sock *sk = sock->sk;
681 	struct xdp_sock *xs = xdp_sk(sk);
682 	int err;
683 
684 	err = xsk_check_common(xs);
685 	if (err)
686 		return err;
687 	if (unlikely(!xs->rx))
688 		return -ENOBUFS;
689 	if (unlikely(need_wait))
690 		return -EOPNOTSUPP;
691 
692 	if (sk_can_busy_loop(sk))
693 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
694 
695 	if (xsk_no_wakeup(sk))
696 		return 0;
697 
698 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
699 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
700 	return 0;
701 }
702 
703 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
704 {
705 	int ret;
706 
707 	rcu_read_lock();
708 	ret = __xsk_recvmsg(sock, m, len, flags);
709 	rcu_read_unlock();
710 
711 	return ret;
712 }
713 
714 static __poll_t xsk_poll(struct file *file, struct socket *sock,
715 			     struct poll_table_struct *wait)
716 {
717 	__poll_t mask = 0;
718 	struct sock *sk = sock->sk;
719 	struct xdp_sock *xs = xdp_sk(sk);
720 	struct xsk_buff_pool *pool;
721 
722 	sock_poll_wait(file, sock, wait);
723 
724 	rcu_read_lock();
725 	if (xsk_check_common(xs))
726 		goto skip_tx;
727 
728 	pool = xs->pool;
729 
730 	if (pool->cached_need_wakeup) {
731 		if (xs->zc)
732 			xsk_wakeup(xs, pool->cached_need_wakeup);
733 		else if (xs->tx)
734 			/* Poll needs to drive Tx also in copy mode */
735 			xsk_generic_xmit(sk);
736 	}
737 
738 skip_tx:
739 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
740 		mask |= EPOLLIN | EPOLLRDNORM;
741 	if (xs->tx && xsk_tx_writeable(xs))
742 		mask |= EPOLLOUT | EPOLLWRNORM;
743 
744 	rcu_read_unlock();
745 	return mask;
746 }
747 
748 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
749 			  bool umem_queue)
750 {
751 	struct xsk_queue *q;
752 
753 	if (entries == 0 || *queue || !is_power_of_2(entries))
754 		return -EINVAL;
755 
756 	q = xskq_create(entries, umem_queue);
757 	if (!q)
758 		return -ENOMEM;
759 
760 	/* Make sure queue is ready before it can be seen by others */
761 	smp_wmb();
762 	WRITE_ONCE(*queue, q);
763 	return 0;
764 }
765 
766 static void xsk_unbind_dev(struct xdp_sock *xs)
767 {
768 	struct net_device *dev = xs->dev;
769 
770 	if (xs->state != XSK_BOUND)
771 		return;
772 	WRITE_ONCE(xs->state, XSK_UNBOUND);
773 
774 	/* Wait for driver to stop using the xdp socket. */
775 	xp_del_xsk(xs->pool, xs);
776 	synchronize_net();
777 	dev_put(dev);
778 }
779 
780 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
781 					      struct xdp_sock __rcu ***map_entry)
782 {
783 	struct xsk_map *map = NULL;
784 	struct xsk_map_node *node;
785 
786 	*map_entry = NULL;
787 
788 	spin_lock_bh(&xs->map_list_lock);
789 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
790 					node);
791 	if (node) {
792 		bpf_map_inc(&node->map->map);
793 		map = node->map;
794 		*map_entry = node->map_entry;
795 	}
796 	spin_unlock_bh(&xs->map_list_lock);
797 	return map;
798 }
799 
800 static void xsk_delete_from_maps(struct xdp_sock *xs)
801 {
802 	/* This function removes the current XDP socket from all the
803 	 * maps it resides in. We need to take extra care here, due to
804 	 * the two locks involved. Each map has a lock synchronizing
805 	 * updates to the entries, and each socket has a lock that
806 	 * synchronizes access to the list of maps (map_list). For
807 	 * deadlock avoidance the locks need to be taken in the order
808 	 * "map lock"->"socket map list lock". We start off by
809 	 * accessing the socket map list, and take a reference to the
810 	 * map to guarantee existence between the
811 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
812 	 * calls. Then we ask the map to remove the socket, which
813 	 * tries to remove the socket from the map. Note that there
814 	 * might be updates to the map between
815 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
816 	 */
817 	struct xdp_sock __rcu **map_entry = NULL;
818 	struct xsk_map *map;
819 
820 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
821 		xsk_map_try_sock_delete(map, xs, map_entry);
822 		bpf_map_put(&map->map);
823 	}
824 }
825 
826 static int xsk_release(struct socket *sock)
827 {
828 	struct sock *sk = sock->sk;
829 	struct xdp_sock *xs = xdp_sk(sk);
830 	struct net *net;
831 
832 	if (!sk)
833 		return 0;
834 
835 	net = sock_net(sk);
836 
837 	mutex_lock(&net->xdp.lock);
838 	sk_del_node_init_rcu(sk);
839 	mutex_unlock(&net->xdp.lock);
840 
841 	sock_prot_inuse_add(net, sk->sk_prot, -1);
842 
843 	xsk_delete_from_maps(xs);
844 	mutex_lock(&xs->mutex);
845 	xsk_unbind_dev(xs);
846 	mutex_unlock(&xs->mutex);
847 
848 	xskq_destroy(xs->rx);
849 	xskq_destroy(xs->tx);
850 	xskq_destroy(xs->fq_tmp);
851 	xskq_destroy(xs->cq_tmp);
852 
853 	sock_orphan(sk);
854 	sock->sk = NULL;
855 
856 	sock_put(sk);
857 
858 	return 0;
859 }
860 
861 static struct socket *xsk_lookup_xsk_from_fd(int fd)
862 {
863 	struct socket *sock;
864 	int err;
865 
866 	sock = sockfd_lookup(fd, &err);
867 	if (!sock)
868 		return ERR_PTR(-ENOTSOCK);
869 
870 	if (sock->sk->sk_family != PF_XDP) {
871 		sockfd_put(sock);
872 		return ERR_PTR(-ENOPROTOOPT);
873 	}
874 
875 	return sock;
876 }
877 
878 static bool xsk_validate_queues(struct xdp_sock *xs)
879 {
880 	return xs->fq_tmp && xs->cq_tmp;
881 }
882 
883 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
884 {
885 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
886 	struct sock *sk = sock->sk;
887 	struct xdp_sock *xs = xdp_sk(sk);
888 	struct net_device *dev;
889 	int bound_dev_if;
890 	u32 flags, qid;
891 	int err = 0;
892 
893 	if (addr_len < sizeof(struct sockaddr_xdp))
894 		return -EINVAL;
895 	if (sxdp->sxdp_family != AF_XDP)
896 		return -EINVAL;
897 
898 	flags = sxdp->sxdp_flags;
899 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
900 		      XDP_USE_NEED_WAKEUP))
901 		return -EINVAL;
902 
903 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
904 	if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
905 		return -EINVAL;
906 
907 	rtnl_lock();
908 	mutex_lock(&xs->mutex);
909 	if (xs->state != XSK_READY) {
910 		err = -EBUSY;
911 		goto out_release;
912 	}
913 
914 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
915 	if (!dev) {
916 		err = -ENODEV;
917 		goto out_release;
918 	}
919 
920 	if (!xs->rx && !xs->tx) {
921 		err = -EINVAL;
922 		goto out_unlock;
923 	}
924 
925 	qid = sxdp->sxdp_queue_id;
926 
927 	if (flags & XDP_SHARED_UMEM) {
928 		struct xdp_sock *umem_xs;
929 		struct socket *sock;
930 
931 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
932 		    (flags & XDP_USE_NEED_WAKEUP)) {
933 			/* Cannot specify flags for shared sockets. */
934 			err = -EINVAL;
935 			goto out_unlock;
936 		}
937 
938 		if (xs->umem) {
939 			/* We have already our own. */
940 			err = -EINVAL;
941 			goto out_unlock;
942 		}
943 
944 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
945 		if (IS_ERR(sock)) {
946 			err = PTR_ERR(sock);
947 			goto out_unlock;
948 		}
949 
950 		umem_xs = xdp_sk(sock->sk);
951 		if (!xsk_is_bound(umem_xs)) {
952 			err = -EBADF;
953 			sockfd_put(sock);
954 			goto out_unlock;
955 		}
956 
957 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
958 			/* Share the umem with another socket on another qid
959 			 * and/or device.
960 			 */
961 			xs->pool = xp_create_and_assign_umem(xs,
962 							     umem_xs->umem);
963 			if (!xs->pool) {
964 				err = -ENOMEM;
965 				sockfd_put(sock);
966 				goto out_unlock;
967 			}
968 
969 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
970 						   qid);
971 			if (err) {
972 				xp_destroy(xs->pool);
973 				xs->pool = NULL;
974 				sockfd_put(sock);
975 				goto out_unlock;
976 			}
977 		} else {
978 			/* Share the buffer pool with the other socket. */
979 			if (xs->fq_tmp || xs->cq_tmp) {
980 				/* Do not allow setting your own fq or cq. */
981 				err = -EINVAL;
982 				sockfd_put(sock);
983 				goto out_unlock;
984 			}
985 
986 			xp_get_pool(umem_xs->pool);
987 			xs->pool = umem_xs->pool;
988 
989 			/* If underlying shared umem was created without Tx
990 			 * ring, allocate Tx descs array that Tx batching API
991 			 * utilizes
992 			 */
993 			if (xs->tx && !xs->pool->tx_descs) {
994 				err = xp_alloc_tx_descs(xs->pool, xs);
995 				if (err) {
996 					xp_put_pool(xs->pool);
997 					sockfd_put(sock);
998 					goto out_unlock;
999 				}
1000 			}
1001 		}
1002 
1003 		xdp_get_umem(umem_xs->umem);
1004 		WRITE_ONCE(xs->umem, umem_xs->umem);
1005 		sockfd_put(sock);
1006 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
1007 		err = -EINVAL;
1008 		goto out_unlock;
1009 	} else {
1010 		/* This xsk has its own umem. */
1011 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1012 		if (!xs->pool) {
1013 			err = -ENOMEM;
1014 			goto out_unlock;
1015 		}
1016 
1017 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1018 		if (err) {
1019 			xp_destroy(xs->pool);
1020 			xs->pool = NULL;
1021 			goto out_unlock;
1022 		}
1023 	}
1024 
1025 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1026 	xs->fq_tmp = NULL;
1027 	xs->cq_tmp = NULL;
1028 
1029 	xs->dev = dev;
1030 	xs->zc = xs->umem->zc;
1031 	xs->queue_id = qid;
1032 	xp_add_xsk(xs->pool, xs);
1033 
1034 out_unlock:
1035 	if (err) {
1036 		dev_put(dev);
1037 	} else {
1038 		/* Matches smp_rmb() in bind() for shared umem
1039 		 * sockets, and xsk_is_bound().
1040 		 */
1041 		smp_wmb();
1042 		WRITE_ONCE(xs->state, XSK_BOUND);
1043 	}
1044 out_release:
1045 	mutex_unlock(&xs->mutex);
1046 	rtnl_unlock();
1047 	return err;
1048 }
1049 
1050 struct xdp_umem_reg_v1 {
1051 	__u64 addr; /* Start of packet data area */
1052 	__u64 len; /* Length of packet data area */
1053 	__u32 chunk_size;
1054 	__u32 headroom;
1055 };
1056 
1057 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1058 			  sockptr_t optval, unsigned int optlen)
1059 {
1060 	struct sock *sk = sock->sk;
1061 	struct xdp_sock *xs = xdp_sk(sk);
1062 	int err;
1063 
1064 	if (level != SOL_XDP)
1065 		return -ENOPROTOOPT;
1066 
1067 	switch (optname) {
1068 	case XDP_RX_RING:
1069 	case XDP_TX_RING:
1070 	{
1071 		struct xsk_queue **q;
1072 		int entries;
1073 
1074 		if (optlen < sizeof(entries))
1075 			return -EINVAL;
1076 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1077 			return -EFAULT;
1078 
1079 		mutex_lock(&xs->mutex);
1080 		if (xs->state != XSK_READY) {
1081 			mutex_unlock(&xs->mutex);
1082 			return -EBUSY;
1083 		}
1084 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1085 		err = xsk_init_queue(entries, q, false);
1086 		if (!err && optname == XDP_TX_RING)
1087 			/* Tx needs to be explicitly woken up the first time */
1088 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1089 		mutex_unlock(&xs->mutex);
1090 		return err;
1091 	}
1092 	case XDP_UMEM_REG:
1093 	{
1094 		size_t mr_size = sizeof(struct xdp_umem_reg);
1095 		struct xdp_umem_reg mr = {};
1096 		struct xdp_umem *umem;
1097 
1098 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1099 			return -EINVAL;
1100 		else if (optlen < sizeof(mr))
1101 			mr_size = sizeof(struct xdp_umem_reg_v1);
1102 
1103 		if (copy_from_sockptr(&mr, optval, mr_size))
1104 			return -EFAULT;
1105 
1106 		mutex_lock(&xs->mutex);
1107 		if (xs->state != XSK_READY || xs->umem) {
1108 			mutex_unlock(&xs->mutex);
1109 			return -EBUSY;
1110 		}
1111 
1112 		umem = xdp_umem_create(&mr);
1113 		if (IS_ERR(umem)) {
1114 			mutex_unlock(&xs->mutex);
1115 			return PTR_ERR(umem);
1116 		}
1117 
1118 		/* Make sure umem is ready before it can be seen by others */
1119 		smp_wmb();
1120 		WRITE_ONCE(xs->umem, umem);
1121 		mutex_unlock(&xs->mutex);
1122 		return 0;
1123 	}
1124 	case XDP_UMEM_FILL_RING:
1125 	case XDP_UMEM_COMPLETION_RING:
1126 	{
1127 		struct xsk_queue **q;
1128 		int entries;
1129 
1130 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1131 			return -EFAULT;
1132 
1133 		mutex_lock(&xs->mutex);
1134 		if (xs->state != XSK_READY) {
1135 			mutex_unlock(&xs->mutex);
1136 			return -EBUSY;
1137 		}
1138 
1139 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1140 			&xs->cq_tmp;
1141 		err = xsk_init_queue(entries, q, true);
1142 		mutex_unlock(&xs->mutex);
1143 		return err;
1144 	}
1145 	default:
1146 		break;
1147 	}
1148 
1149 	return -ENOPROTOOPT;
1150 }
1151 
1152 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1153 {
1154 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1155 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1156 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1157 }
1158 
1159 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1160 {
1161 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1162 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1163 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1164 }
1165 
1166 struct xdp_statistics_v1 {
1167 	__u64 rx_dropped;
1168 	__u64 rx_invalid_descs;
1169 	__u64 tx_invalid_descs;
1170 };
1171 
1172 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1173 			  char __user *optval, int __user *optlen)
1174 {
1175 	struct sock *sk = sock->sk;
1176 	struct xdp_sock *xs = xdp_sk(sk);
1177 	int len;
1178 
1179 	if (level != SOL_XDP)
1180 		return -ENOPROTOOPT;
1181 
1182 	if (get_user(len, optlen))
1183 		return -EFAULT;
1184 	if (len < 0)
1185 		return -EINVAL;
1186 
1187 	switch (optname) {
1188 	case XDP_STATISTICS:
1189 	{
1190 		struct xdp_statistics stats = {};
1191 		bool extra_stats = true;
1192 		size_t stats_size;
1193 
1194 		if (len < sizeof(struct xdp_statistics_v1)) {
1195 			return -EINVAL;
1196 		} else if (len < sizeof(stats)) {
1197 			extra_stats = false;
1198 			stats_size = sizeof(struct xdp_statistics_v1);
1199 		} else {
1200 			stats_size = sizeof(stats);
1201 		}
1202 
1203 		mutex_lock(&xs->mutex);
1204 		stats.rx_dropped = xs->rx_dropped;
1205 		if (extra_stats) {
1206 			stats.rx_ring_full = xs->rx_queue_full;
1207 			stats.rx_fill_ring_empty_descs =
1208 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1209 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1210 		} else {
1211 			stats.rx_dropped += xs->rx_queue_full;
1212 		}
1213 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1214 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1215 		mutex_unlock(&xs->mutex);
1216 
1217 		if (copy_to_user(optval, &stats, stats_size))
1218 			return -EFAULT;
1219 		if (put_user(stats_size, optlen))
1220 			return -EFAULT;
1221 
1222 		return 0;
1223 	}
1224 	case XDP_MMAP_OFFSETS:
1225 	{
1226 		struct xdp_mmap_offsets off;
1227 		struct xdp_mmap_offsets_v1 off_v1;
1228 		bool flags_supported = true;
1229 		void *to_copy;
1230 
1231 		if (len < sizeof(off_v1))
1232 			return -EINVAL;
1233 		else if (len < sizeof(off))
1234 			flags_supported = false;
1235 
1236 		if (flags_supported) {
1237 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1238 			 * except for the flags field added to the end.
1239 			 */
1240 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1241 					       &off.rx);
1242 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1243 					       &off.tx);
1244 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1245 					       &off.fr);
1246 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1247 					       &off.cr);
1248 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1249 						ptrs.flags);
1250 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1251 						ptrs.flags);
1252 			off.fr.flags = offsetof(struct xdp_umem_ring,
1253 						ptrs.flags);
1254 			off.cr.flags = offsetof(struct xdp_umem_ring,
1255 						ptrs.flags);
1256 
1257 			len = sizeof(off);
1258 			to_copy = &off;
1259 		} else {
1260 			xsk_enter_rxtx_offsets(&off_v1.rx);
1261 			xsk_enter_rxtx_offsets(&off_v1.tx);
1262 			xsk_enter_umem_offsets(&off_v1.fr);
1263 			xsk_enter_umem_offsets(&off_v1.cr);
1264 
1265 			len = sizeof(off_v1);
1266 			to_copy = &off_v1;
1267 		}
1268 
1269 		if (copy_to_user(optval, to_copy, len))
1270 			return -EFAULT;
1271 		if (put_user(len, optlen))
1272 			return -EFAULT;
1273 
1274 		return 0;
1275 	}
1276 	case XDP_OPTIONS:
1277 	{
1278 		struct xdp_options opts = {};
1279 
1280 		if (len < sizeof(opts))
1281 			return -EINVAL;
1282 
1283 		mutex_lock(&xs->mutex);
1284 		if (xs->zc)
1285 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1286 		mutex_unlock(&xs->mutex);
1287 
1288 		len = sizeof(opts);
1289 		if (copy_to_user(optval, &opts, len))
1290 			return -EFAULT;
1291 		if (put_user(len, optlen))
1292 			return -EFAULT;
1293 
1294 		return 0;
1295 	}
1296 	default:
1297 		break;
1298 	}
1299 
1300 	return -EOPNOTSUPP;
1301 }
1302 
1303 static int xsk_mmap(struct file *file, struct socket *sock,
1304 		    struct vm_area_struct *vma)
1305 {
1306 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1307 	unsigned long size = vma->vm_end - vma->vm_start;
1308 	struct xdp_sock *xs = xdp_sk(sock->sk);
1309 	int state = READ_ONCE(xs->state);
1310 	struct xsk_queue *q = NULL;
1311 
1312 	if (state != XSK_READY && state != XSK_BOUND)
1313 		return -EBUSY;
1314 
1315 	if (offset == XDP_PGOFF_RX_RING) {
1316 		q = READ_ONCE(xs->rx);
1317 	} else if (offset == XDP_PGOFF_TX_RING) {
1318 		q = READ_ONCE(xs->tx);
1319 	} else {
1320 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1321 		smp_rmb();
1322 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1323 			q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1324 						 READ_ONCE(xs->pool->fq);
1325 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1326 			q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1327 						 READ_ONCE(xs->pool->cq);
1328 	}
1329 
1330 	if (!q)
1331 		return -EINVAL;
1332 
1333 	/* Matches the smp_wmb() in xsk_init_queue */
1334 	smp_rmb();
1335 	if (size > q->ring_vmalloc_size)
1336 		return -EINVAL;
1337 
1338 	return remap_vmalloc_range(vma, q->ring, 0);
1339 }
1340 
1341 static int xsk_notifier(struct notifier_block *this,
1342 			unsigned long msg, void *ptr)
1343 {
1344 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1345 	struct net *net = dev_net(dev);
1346 	struct sock *sk;
1347 
1348 	switch (msg) {
1349 	case NETDEV_UNREGISTER:
1350 		mutex_lock(&net->xdp.lock);
1351 		sk_for_each(sk, &net->xdp.list) {
1352 			struct xdp_sock *xs = xdp_sk(sk);
1353 
1354 			mutex_lock(&xs->mutex);
1355 			if (xs->dev == dev) {
1356 				sk->sk_err = ENETDOWN;
1357 				if (!sock_flag(sk, SOCK_DEAD))
1358 					sk_error_report(sk);
1359 
1360 				xsk_unbind_dev(xs);
1361 
1362 				/* Clear device references. */
1363 				xp_clear_dev(xs->pool);
1364 			}
1365 			mutex_unlock(&xs->mutex);
1366 		}
1367 		mutex_unlock(&net->xdp.lock);
1368 		break;
1369 	}
1370 	return NOTIFY_DONE;
1371 }
1372 
1373 static struct proto xsk_proto = {
1374 	.name =		"XDP",
1375 	.owner =	THIS_MODULE,
1376 	.obj_size =	sizeof(struct xdp_sock),
1377 };
1378 
1379 static const struct proto_ops xsk_proto_ops = {
1380 	.family		= PF_XDP,
1381 	.owner		= THIS_MODULE,
1382 	.release	= xsk_release,
1383 	.bind		= xsk_bind,
1384 	.connect	= sock_no_connect,
1385 	.socketpair	= sock_no_socketpair,
1386 	.accept		= sock_no_accept,
1387 	.getname	= sock_no_getname,
1388 	.poll		= xsk_poll,
1389 	.ioctl		= sock_no_ioctl,
1390 	.listen		= sock_no_listen,
1391 	.shutdown	= sock_no_shutdown,
1392 	.setsockopt	= xsk_setsockopt,
1393 	.getsockopt	= xsk_getsockopt,
1394 	.sendmsg	= xsk_sendmsg,
1395 	.recvmsg	= xsk_recvmsg,
1396 	.mmap		= xsk_mmap,
1397 };
1398 
1399 static void xsk_destruct(struct sock *sk)
1400 {
1401 	struct xdp_sock *xs = xdp_sk(sk);
1402 
1403 	if (!sock_flag(sk, SOCK_DEAD))
1404 		return;
1405 
1406 	if (!xp_put_pool(xs->pool))
1407 		xdp_put_umem(xs->umem, !xs->pool);
1408 }
1409 
1410 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1411 		      int kern)
1412 {
1413 	struct xdp_sock *xs;
1414 	struct sock *sk;
1415 
1416 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1417 		return -EPERM;
1418 	if (sock->type != SOCK_RAW)
1419 		return -ESOCKTNOSUPPORT;
1420 
1421 	if (protocol)
1422 		return -EPROTONOSUPPORT;
1423 
1424 	sock->state = SS_UNCONNECTED;
1425 
1426 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1427 	if (!sk)
1428 		return -ENOBUFS;
1429 
1430 	sock->ops = &xsk_proto_ops;
1431 
1432 	sock_init_data(sock, sk);
1433 
1434 	sk->sk_family = PF_XDP;
1435 
1436 	sk->sk_destruct = xsk_destruct;
1437 
1438 	sock_set_flag(sk, SOCK_RCU_FREE);
1439 
1440 	xs = xdp_sk(sk);
1441 	xs->state = XSK_READY;
1442 	mutex_init(&xs->mutex);
1443 	spin_lock_init(&xs->rx_lock);
1444 
1445 	INIT_LIST_HEAD(&xs->map_list);
1446 	spin_lock_init(&xs->map_list_lock);
1447 
1448 	mutex_lock(&net->xdp.lock);
1449 	sk_add_node_rcu(sk, &net->xdp.list);
1450 	mutex_unlock(&net->xdp.lock);
1451 
1452 	sock_prot_inuse_add(net, &xsk_proto, 1);
1453 
1454 	return 0;
1455 }
1456 
1457 static const struct net_proto_family xsk_family_ops = {
1458 	.family = PF_XDP,
1459 	.create = xsk_create,
1460 	.owner	= THIS_MODULE,
1461 };
1462 
1463 static struct notifier_block xsk_netdev_notifier = {
1464 	.notifier_call	= xsk_notifier,
1465 };
1466 
1467 static int __net_init xsk_net_init(struct net *net)
1468 {
1469 	mutex_init(&net->xdp.lock);
1470 	INIT_HLIST_HEAD(&net->xdp.list);
1471 	return 0;
1472 }
1473 
1474 static void __net_exit xsk_net_exit(struct net *net)
1475 {
1476 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1477 }
1478 
1479 static struct pernet_operations xsk_net_ops = {
1480 	.init = xsk_net_init,
1481 	.exit = xsk_net_exit,
1482 };
1483 
1484 static int __init xsk_init(void)
1485 {
1486 	int err, cpu;
1487 
1488 	err = proto_register(&xsk_proto, 0 /* no slab */);
1489 	if (err)
1490 		goto out;
1491 
1492 	err = sock_register(&xsk_family_ops);
1493 	if (err)
1494 		goto out_proto;
1495 
1496 	err = register_pernet_subsys(&xsk_net_ops);
1497 	if (err)
1498 		goto out_sk;
1499 
1500 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1501 	if (err)
1502 		goto out_pernet;
1503 
1504 	for_each_possible_cpu(cpu)
1505 		INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1506 	return 0;
1507 
1508 out_pernet:
1509 	unregister_pernet_subsys(&xsk_net_ops);
1510 out_sk:
1511 	sock_unregister(PF_XDP);
1512 out_proto:
1513 	proto_unregister(&xsk_proto);
1514 out:
1515 	return err;
1516 }
1517 
1518 fs_initcall(xsk_init);
1519