xref: /openbmc/linux/net/xdp/xsk.c (revision c745cfb2)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
26 #include <net/busy_poll.h>
27 #include <net/xdp.h>
28 
29 #include "xsk_queue.h"
30 #include "xdp_umem.h"
31 #include "xsk.h"
32 
33 #define TX_BATCH_SIZE 32
34 
35 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36 
37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
38 {
39 	if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
40 		return;
41 
42 	pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
43 	pool->cached_need_wakeup |= XDP_WAKEUP_RX;
44 }
45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
46 
47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
48 {
49 	struct xdp_sock *xs;
50 
51 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
52 		return;
53 
54 	rcu_read_lock();
55 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
56 		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
57 	}
58 	rcu_read_unlock();
59 
60 	pool->cached_need_wakeup |= XDP_WAKEUP_TX;
61 }
62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
63 
64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
65 {
66 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
67 		return;
68 
69 	pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
70 	pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
71 }
72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
73 
74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
75 {
76 	struct xdp_sock *xs;
77 
78 	if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
79 		return;
80 
81 	rcu_read_lock();
82 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
83 		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
84 	}
85 	rcu_read_unlock();
86 
87 	pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
88 }
89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
90 
91 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
92 {
93 	return pool->uses_need_wakeup;
94 }
95 EXPORT_SYMBOL(xsk_uses_need_wakeup);
96 
97 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
98 					    u16 queue_id)
99 {
100 	if (queue_id < dev->real_num_rx_queues)
101 		return dev->_rx[queue_id].pool;
102 	if (queue_id < dev->real_num_tx_queues)
103 		return dev->_tx[queue_id].pool;
104 
105 	return NULL;
106 }
107 EXPORT_SYMBOL(xsk_get_pool_from_qid);
108 
109 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
110 {
111 	if (queue_id < dev->num_rx_queues)
112 		dev->_rx[queue_id].pool = NULL;
113 	if (queue_id < dev->num_tx_queues)
114 		dev->_tx[queue_id].pool = NULL;
115 }
116 
117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
118  * not know if the device has more tx queues than rx, or the opposite.
119  * This might also change during run time.
120  */
121 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
122 			u16 queue_id)
123 {
124 	if (queue_id >= max_t(unsigned int,
125 			      dev->real_num_rx_queues,
126 			      dev->real_num_tx_queues))
127 		return -EINVAL;
128 
129 	if (queue_id < dev->real_num_rx_queues)
130 		dev->_rx[queue_id].pool = pool;
131 	if (queue_id < dev->real_num_tx_queues)
132 		dev->_tx[queue_id].pool = pool;
133 
134 	return 0;
135 }
136 
137 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
138 {
139 	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
140 	u64 addr;
141 	int err;
142 
143 	addr = xp_get_handle(xskb);
144 	err = xskq_prod_reserve_desc(xs->rx, addr, len);
145 	if (err) {
146 		xs->rx_queue_full++;
147 		return err;
148 	}
149 
150 	xp_release(xskb);
151 	return 0;
152 }
153 
154 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
155 {
156 	void *from_buf, *to_buf;
157 	u32 metalen;
158 
159 	if (unlikely(xdp_data_meta_unsupported(from))) {
160 		from_buf = from->data;
161 		to_buf = to->data;
162 		metalen = 0;
163 	} else {
164 		from_buf = from->data_meta;
165 		metalen = from->data - from->data_meta;
166 		to_buf = to->data - metalen;
167 	}
168 
169 	memcpy(to_buf, from_buf, len + metalen);
170 }
171 
172 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
173 {
174 	struct xdp_buff *xsk_xdp;
175 	int err;
176 	u32 len;
177 
178 	len = xdp->data_end - xdp->data;
179 	if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
180 		xs->rx_dropped++;
181 		return -ENOSPC;
182 	}
183 
184 	xsk_xdp = xsk_buff_alloc(xs->pool);
185 	if (!xsk_xdp) {
186 		xs->rx_dropped++;
187 		return -ENOMEM;
188 	}
189 
190 	xsk_copy_xdp(xsk_xdp, xdp, len);
191 	err = __xsk_rcv_zc(xs, xsk_xdp, len);
192 	if (err) {
193 		xsk_buff_free(xsk_xdp);
194 		return err;
195 	}
196 	return 0;
197 }
198 
199 static bool xsk_tx_writeable(struct xdp_sock *xs)
200 {
201 	if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
202 		return false;
203 
204 	return true;
205 }
206 
207 static bool xsk_is_bound(struct xdp_sock *xs)
208 {
209 	if (READ_ONCE(xs->state) == XSK_BOUND) {
210 		/* Matches smp_wmb() in bind(). */
211 		smp_rmb();
212 		return true;
213 	}
214 	return false;
215 }
216 
217 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp)
218 {
219 	if (!xsk_is_bound(xs))
220 		return -ENXIO;
221 
222 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
223 		return -EINVAL;
224 
225 	sk_mark_napi_id_once_xdp(&xs->sk, xdp);
226 	return 0;
227 }
228 
229 static void xsk_flush(struct xdp_sock *xs)
230 {
231 	xskq_prod_submit(xs->rx);
232 	__xskq_cons_release(xs->pool->fq);
233 	sock_def_readable(&xs->sk);
234 }
235 
236 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
237 {
238 	int err;
239 
240 	spin_lock_bh(&xs->rx_lock);
241 	err = xsk_rcv_check(xs, xdp);
242 	if (!err) {
243 		err = __xsk_rcv(xs, xdp);
244 		xsk_flush(xs);
245 	}
246 	spin_unlock_bh(&xs->rx_lock);
247 	return err;
248 }
249 
250 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
251 {
252 	int err;
253 	u32 len;
254 
255 	err = xsk_rcv_check(xs, xdp);
256 	if (err)
257 		return err;
258 
259 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
260 		len = xdp->data_end - xdp->data;
261 		return __xsk_rcv_zc(xs, xdp, len);
262 	}
263 
264 	err = __xsk_rcv(xs, xdp);
265 	if (!err)
266 		xdp_return_buff(xdp);
267 	return err;
268 }
269 
270 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
271 {
272 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
273 	int err;
274 
275 	err = xsk_rcv(xs, xdp);
276 	if (err)
277 		return err;
278 
279 	if (!xs->flush_node.prev)
280 		list_add(&xs->flush_node, flush_list);
281 
282 	return 0;
283 }
284 
285 void __xsk_map_flush(void)
286 {
287 	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
288 	struct xdp_sock *xs, *tmp;
289 
290 	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
291 		xsk_flush(xs);
292 		__list_del_clearprev(&xs->flush_node);
293 	}
294 }
295 
296 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
297 {
298 	xskq_prod_submit_n(pool->cq, nb_entries);
299 }
300 EXPORT_SYMBOL(xsk_tx_completed);
301 
302 void xsk_tx_release(struct xsk_buff_pool *pool)
303 {
304 	struct xdp_sock *xs;
305 
306 	rcu_read_lock();
307 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
308 		__xskq_cons_release(xs->tx);
309 		if (xsk_tx_writeable(xs))
310 			xs->sk.sk_write_space(&xs->sk);
311 	}
312 	rcu_read_unlock();
313 }
314 EXPORT_SYMBOL(xsk_tx_release);
315 
316 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
317 {
318 	struct xdp_sock *xs;
319 
320 	rcu_read_lock();
321 	list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
322 		if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
323 			xs->tx->queue_empty_descs++;
324 			continue;
325 		}
326 
327 		/* This is the backpressure mechanism for the Tx path.
328 		 * Reserve space in the completion queue and only proceed
329 		 * if there is space in it. This avoids having to implement
330 		 * any buffering in the Tx path.
331 		 */
332 		if (xskq_prod_reserve_addr(pool->cq, desc->addr))
333 			goto out;
334 
335 		xskq_cons_release(xs->tx);
336 		rcu_read_unlock();
337 		return true;
338 	}
339 
340 out:
341 	rcu_read_unlock();
342 	return false;
343 }
344 EXPORT_SYMBOL(xsk_tx_peek_desc);
345 
346 static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
347 {
348 	struct xdp_desc *descs = pool->tx_descs;
349 	u32 nb_pkts = 0;
350 
351 	while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
352 		nb_pkts++;
353 
354 	xsk_tx_release(pool);
355 	return nb_pkts;
356 }
357 
358 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
359 {
360 	struct xdp_sock *xs;
361 
362 	rcu_read_lock();
363 	if (!list_is_singular(&pool->xsk_tx_list)) {
364 		/* Fallback to the non-batched version */
365 		rcu_read_unlock();
366 		return xsk_tx_peek_release_fallback(pool, nb_pkts);
367 	}
368 
369 	xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
370 	if (!xs) {
371 		nb_pkts = 0;
372 		goto out;
373 	}
374 
375 	nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
376 
377 	/* This is the backpressure mechanism for the Tx path. Try to
378 	 * reserve space in the completion queue for all packets, but
379 	 * if there are fewer slots available, just process that many
380 	 * packets. This avoids having to implement any buffering in
381 	 * the Tx path.
382 	 */
383 	nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
384 	if (!nb_pkts)
385 		goto out;
386 
387 	nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
388 	if (!nb_pkts) {
389 		xs->tx->queue_empty_descs++;
390 		goto out;
391 	}
392 
393 	__xskq_cons_release(xs->tx);
394 	xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
395 	xs->sk.sk_write_space(&xs->sk);
396 
397 out:
398 	rcu_read_unlock();
399 	return nb_pkts;
400 }
401 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
402 
403 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
404 {
405 	struct net_device *dev = xs->dev;
406 
407 	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
408 }
409 
410 static void xsk_destruct_skb(struct sk_buff *skb)
411 {
412 	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
413 	struct xdp_sock *xs = xdp_sk(skb->sk);
414 	unsigned long flags;
415 
416 	spin_lock_irqsave(&xs->pool->cq_lock, flags);
417 	xskq_prod_submit_addr(xs->pool->cq, addr);
418 	spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
419 
420 	sock_wfree(skb);
421 }
422 
423 static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
424 					      struct xdp_desc *desc)
425 {
426 	struct xsk_buff_pool *pool = xs->pool;
427 	u32 hr, len, ts, offset, copy, copied;
428 	struct sk_buff *skb;
429 	struct page *page;
430 	void *buffer;
431 	int err, i;
432 	u64 addr;
433 
434 	hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
435 
436 	skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
437 	if (unlikely(!skb))
438 		return ERR_PTR(err);
439 
440 	skb_reserve(skb, hr);
441 
442 	addr = desc->addr;
443 	len = desc->len;
444 	ts = pool->unaligned ? len : pool->chunk_size;
445 
446 	buffer = xsk_buff_raw_get_data(pool, addr);
447 	offset = offset_in_page(buffer);
448 	addr = buffer - pool->addrs;
449 
450 	for (copied = 0, i = 0; copied < len; i++) {
451 		page = pool->umem->pgs[addr >> PAGE_SHIFT];
452 		get_page(page);
453 
454 		copy = min_t(u32, PAGE_SIZE - offset, len - copied);
455 		skb_fill_page_desc(skb, i, page, offset, copy);
456 
457 		copied += copy;
458 		addr += copy;
459 		offset = 0;
460 	}
461 
462 	skb->len += len;
463 	skb->data_len += len;
464 	skb->truesize += ts;
465 
466 	refcount_add(ts, &xs->sk.sk_wmem_alloc);
467 
468 	return skb;
469 }
470 
471 static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
472 				     struct xdp_desc *desc)
473 {
474 	struct net_device *dev = xs->dev;
475 	struct sk_buff *skb;
476 
477 	if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
478 		skb = xsk_build_skb_zerocopy(xs, desc);
479 		if (IS_ERR(skb))
480 			return skb;
481 	} else {
482 		u32 hr, tr, len;
483 		void *buffer;
484 		int err;
485 
486 		hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
487 		tr = dev->needed_tailroom;
488 		len = desc->len;
489 
490 		skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
491 		if (unlikely(!skb))
492 			return ERR_PTR(err);
493 
494 		skb_reserve(skb, hr);
495 		skb_put(skb, len);
496 
497 		buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
498 		err = skb_store_bits(skb, 0, buffer, len);
499 		if (unlikely(err)) {
500 			kfree_skb(skb);
501 			return ERR_PTR(err);
502 		}
503 	}
504 
505 	skb->dev = dev;
506 	skb->priority = xs->sk.sk_priority;
507 	skb->mark = xs->sk.sk_mark;
508 	skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
509 	skb->destructor = xsk_destruct_skb;
510 
511 	return skb;
512 }
513 
514 static int xsk_generic_xmit(struct sock *sk)
515 {
516 	struct xdp_sock *xs = xdp_sk(sk);
517 	u32 max_batch = TX_BATCH_SIZE;
518 	bool sent_frame = false;
519 	struct xdp_desc desc;
520 	struct sk_buff *skb;
521 	unsigned long flags;
522 	int err = 0;
523 
524 	mutex_lock(&xs->mutex);
525 
526 	/* Since we dropped the RCU read lock, the socket state might have changed. */
527 	if (unlikely(!xsk_is_bound(xs))) {
528 		err = -ENXIO;
529 		goto out;
530 	}
531 
532 	if (xs->queue_id >= xs->dev->real_num_tx_queues)
533 		goto out;
534 
535 	while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
536 		if (max_batch-- == 0) {
537 			err = -EAGAIN;
538 			goto out;
539 		}
540 
541 		/* This is the backpressure mechanism for the Tx path.
542 		 * Reserve space in the completion queue and only proceed
543 		 * if there is space in it. This avoids having to implement
544 		 * any buffering in the Tx path.
545 		 */
546 		spin_lock_irqsave(&xs->pool->cq_lock, flags);
547 		if (xskq_prod_reserve(xs->pool->cq)) {
548 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
549 			goto out;
550 		}
551 		spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
552 
553 		skb = xsk_build_skb(xs, &desc);
554 		if (IS_ERR(skb)) {
555 			err = PTR_ERR(skb);
556 			spin_lock_irqsave(&xs->pool->cq_lock, flags);
557 			xskq_prod_cancel(xs->pool->cq);
558 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
559 			goto out;
560 		}
561 
562 		err = __dev_direct_xmit(skb, xs->queue_id);
563 		if  (err == NETDEV_TX_BUSY) {
564 			/* Tell user-space to retry the send */
565 			skb->destructor = sock_wfree;
566 			spin_lock_irqsave(&xs->pool->cq_lock, flags);
567 			xskq_prod_cancel(xs->pool->cq);
568 			spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
569 			/* Free skb without triggering the perf drop trace */
570 			consume_skb(skb);
571 			err = -EAGAIN;
572 			goto out;
573 		}
574 
575 		xskq_cons_release(xs->tx);
576 		/* Ignore NET_XMIT_CN as packet might have been sent */
577 		if (err == NET_XMIT_DROP) {
578 			/* SKB completed but not sent */
579 			err = -EBUSY;
580 			goto out;
581 		}
582 
583 		sent_frame = true;
584 	}
585 
586 	xs->tx->queue_empty_descs++;
587 
588 out:
589 	if (sent_frame)
590 		if (xsk_tx_writeable(xs))
591 			sk->sk_write_space(sk);
592 
593 	mutex_unlock(&xs->mutex);
594 	return err;
595 }
596 
597 static int xsk_xmit(struct sock *sk)
598 {
599 	struct xdp_sock *xs = xdp_sk(sk);
600 	int ret;
601 
602 	if (unlikely(!(xs->dev->flags & IFF_UP)))
603 		return -ENETDOWN;
604 	if (unlikely(!xs->tx))
605 		return -ENOBUFS;
606 
607 	if (xs->zc)
608 		return xsk_wakeup(xs, XDP_WAKEUP_TX);
609 
610 	/* Drop the RCU lock since the SKB path might sleep. */
611 	rcu_read_unlock();
612 	ret = xsk_generic_xmit(sk);
613 	/* Reaquire RCU lock before going into common code. */
614 	rcu_read_lock();
615 
616 	return ret;
617 }
618 
619 static bool xsk_no_wakeup(struct sock *sk)
620 {
621 #ifdef CONFIG_NET_RX_BUSY_POLL
622 	/* Prefer busy-polling, skip the wakeup. */
623 	return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
624 		READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
625 #else
626 	return false;
627 #endif
628 }
629 
630 static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
631 {
632 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
633 	struct sock *sk = sock->sk;
634 	struct xdp_sock *xs = xdp_sk(sk);
635 	struct xsk_buff_pool *pool;
636 
637 	if (unlikely(!xsk_is_bound(xs)))
638 		return -ENXIO;
639 	if (unlikely(need_wait))
640 		return -EOPNOTSUPP;
641 
642 	if (sk_can_busy_loop(sk)) {
643 		if (xs->zc)
644 			__sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
645 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
646 	}
647 
648 	if (xs->zc && xsk_no_wakeup(sk))
649 		return 0;
650 
651 	pool = xs->pool;
652 	if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
653 		return xsk_xmit(sk);
654 	return 0;
655 }
656 
657 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
658 {
659 	int ret;
660 
661 	rcu_read_lock();
662 	ret = __xsk_sendmsg(sock, m, total_len);
663 	rcu_read_unlock();
664 
665 	return ret;
666 }
667 
668 static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
669 {
670 	bool need_wait = !(flags & MSG_DONTWAIT);
671 	struct sock *sk = sock->sk;
672 	struct xdp_sock *xs = xdp_sk(sk);
673 
674 	if (unlikely(!xsk_is_bound(xs)))
675 		return -ENXIO;
676 	if (unlikely(!(xs->dev->flags & IFF_UP)))
677 		return -ENETDOWN;
678 	if (unlikely(!xs->rx))
679 		return -ENOBUFS;
680 	if (unlikely(need_wait))
681 		return -EOPNOTSUPP;
682 
683 	if (sk_can_busy_loop(sk))
684 		sk_busy_loop(sk, 1); /* only support non-blocking sockets */
685 
686 	if (xsk_no_wakeup(sk))
687 		return 0;
688 
689 	if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
690 		return xsk_wakeup(xs, XDP_WAKEUP_RX);
691 	return 0;
692 }
693 
694 static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
695 {
696 	int ret;
697 
698 	rcu_read_lock();
699 	ret = __xsk_recvmsg(sock, m, len, flags);
700 	rcu_read_unlock();
701 
702 	return ret;
703 }
704 
705 static __poll_t xsk_poll(struct file *file, struct socket *sock,
706 			     struct poll_table_struct *wait)
707 {
708 	__poll_t mask = 0;
709 	struct sock *sk = sock->sk;
710 	struct xdp_sock *xs = xdp_sk(sk);
711 	struct xsk_buff_pool *pool;
712 
713 	sock_poll_wait(file, sock, wait);
714 
715 	rcu_read_lock();
716 	if (unlikely(!xsk_is_bound(xs))) {
717 		rcu_read_unlock();
718 		return mask;
719 	}
720 
721 	pool = xs->pool;
722 
723 	if (pool->cached_need_wakeup) {
724 		if (xs->zc)
725 			xsk_wakeup(xs, pool->cached_need_wakeup);
726 		else
727 			/* Poll needs to drive Tx also in copy mode */
728 			xsk_xmit(sk);
729 	}
730 
731 	if (xs->rx && !xskq_prod_is_empty(xs->rx))
732 		mask |= EPOLLIN | EPOLLRDNORM;
733 	if (xs->tx && xsk_tx_writeable(xs))
734 		mask |= EPOLLOUT | EPOLLWRNORM;
735 
736 	rcu_read_unlock();
737 	return mask;
738 }
739 
740 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
741 			  bool umem_queue)
742 {
743 	struct xsk_queue *q;
744 
745 	if (entries == 0 || *queue || !is_power_of_2(entries))
746 		return -EINVAL;
747 
748 	q = xskq_create(entries, umem_queue);
749 	if (!q)
750 		return -ENOMEM;
751 
752 	/* Make sure queue is ready before it can be seen by others */
753 	smp_wmb();
754 	WRITE_ONCE(*queue, q);
755 	return 0;
756 }
757 
758 static void xsk_unbind_dev(struct xdp_sock *xs)
759 {
760 	struct net_device *dev = xs->dev;
761 
762 	if (xs->state != XSK_BOUND)
763 		return;
764 	WRITE_ONCE(xs->state, XSK_UNBOUND);
765 
766 	/* Wait for driver to stop using the xdp socket. */
767 	xp_del_xsk(xs->pool, xs);
768 	synchronize_net();
769 	dev_put(dev);
770 }
771 
772 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
773 					      struct xdp_sock __rcu ***map_entry)
774 {
775 	struct xsk_map *map = NULL;
776 	struct xsk_map_node *node;
777 
778 	*map_entry = NULL;
779 
780 	spin_lock_bh(&xs->map_list_lock);
781 	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
782 					node);
783 	if (node) {
784 		bpf_map_inc(&node->map->map);
785 		map = node->map;
786 		*map_entry = node->map_entry;
787 	}
788 	spin_unlock_bh(&xs->map_list_lock);
789 	return map;
790 }
791 
792 static void xsk_delete_from_maps(struct xdp_sock *xs)
793 {
794 	/* This function removes the current XDP socket from all the
795 	 * maps it resides in. We need to take extra care here, due to
796 	 * the two locks involved. Each map has a lock synchronizing
797 	 * updates to the entries, and each socket has a lock that
798 	 * synchronizes access to the list of maps (map_list). For
799 	 * deadlock avoidance the locks need to be taken in the order
800 	 * "map lock"->"socket map list lock". We start off by
801 	 * accessing the socket map list, and take a reference to the
802 	 * map to guarantee existence between the
803 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
804 	 * calls. Then we ask the map to remove the socket, which
805 	 * tries to remove the socket from the map. Note that there
806 	 * might be updates to the map between
807 	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
808 	 */
809 	struct xdp_sock __rcu **map_entry = NULL;
810 	struct xsk_map *map;
811 
812 	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
813 		xsk_map_try_sock_delete(map, xs, map_entry);
814 		bpf_map_put(&map->map);
815 	}
816 }
817 
818 static int xsk_release(struct socket *sock)
819 {
820 	struct sock *sk = sock->sk;
821 	struct xdp_sock *xs = xdp_sk(sk);
822 	struct net *net;
823 
824 	if (!sk)
825 		return 0;
826 
827 	net = sock_net(sk);
828 
829 	mutex_lock(&net->xdp.lock);
830 	sk_del_node_init_rcu(sk);
831 	mutex_unlock(&net->xdp.lock);
832 
833 	sock_prot_inuse_add(net, sk->sk_prot, -1);
834 
835 	xsk_delete_from_maps(xs);
836 	mutex_lock(&xs->mutex);
837 	xsk_unbind_dev(xs);
838 	mutex_unlock(&xs->mutex);
839 
840 	xskq_destroy(xs->rx);
841 	xskq_destroy(xs->tx);
842 	xskq_destroy(xs->fq_tmp);
843 	xskq_destroy(xs->cq_tmp);
844 
845 	sock_orphan(sk);
846 	sock->sk = NULL;
847 
848 	sock_put(sk);
849 
850 	return 0;
851 }
852 
853 static struct socket *xsk_lookup_xsk_from_fd(int fd)
854 {
855 	struct socket *sock;
856 	int err;
857 
858 	sock = sockfd_lookup(fd, &err);
859 	if (!sock)
860 		return ERR_PTR(-ENOTSOCK);
861 
862 	if (sock->sk->sk_family != PF_XDP) {
863 		sockfd_put(sock);
864 		return ERR_PTR(-ENOPROTOOPT);
865 	}
866 
867 	return sock;
868 }
869 
870 static bool xsk_validate_queues(struct xdp_sock *xs)
871 {
872 	return xs->fq_tmp && xs->cq_tmp;
873 }
874 
875 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
876 {
877 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
878 	struct sock *sk = sock->sk;
879 	struct xdp_sock *xs = xdp_sk(sk);
880 	struct net_device *dev;
881 	u32 flags, qid;
882 	int err = 0;
883 
884 	if (addr_len < sizeof(struct sockaddr_xdp))
885 		return -EINVAL;
886 	if (sxdp->sxdp_family != AF_XDP)
887 		return -EINVAL;
888 
889 	flags = sxdp->sxdp_flags;
890 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
891 		      XDP_USE_NEED_WAKEUP))
892 		return -EINVAL;
893 
894 	rtnl_lock();
895 	mutex_lock(&xs->mutex);
896 	if (xs->state != XSK_READY) {
897 		err = -EBUSY;
898 		goto out_release;
899 	}
900 
901 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
902 	if (!dev) {
903 		err = -ENODEV;
904 		goto out_release;
905 	}
906 
907 	if (!xs->rx && !xs->tx) {
908 		err = -EINVAL;
909 		goto out_unlock;
910 	}
911 
912 	qid = sxdp->sxdp_queue_id;
913 
914 	if (flags & XDP_SHARED_UMEM) {
915 		struct xdp_sock *umem_xs;
916 		struct socket *sock;
917 
918 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
919 		    (flags & XDP_USE_NEED_WAKEUP)) {
920 			/* Cannot specify flags for shared sockets. */
921 			err = -EINVAL;
922 			goto out_unlock;
923 		}
924 
925 		if (xs->umem) {
926 			/* We have already our own. */
927 			err = -EINVAL;
928 			goto out_unlock;
929 		}
930 
931 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
932 		if (IS_ERR(sock)) {
933 			err = PTR_ERR(sock);
934 			goto out_unlock;
935 		}
936 
937 		umem_xs = xdp_sk(sock->sk);
938 		if (!xsk_is_bound(umem_xs)) {
939 			err = -EBADF;
940 			sockfd_put(sock);
941 			goto out_unlock;
942 		}
943 
944 		if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
945 			/* Share the umem with another socket on another qid
946 			 * and/or device.
947 			 */
948 			xs->pool = xp_create_and_assign_umem(xs,
949 							     umem_xs->umem);
950 			if (!xs->pool) {
951 				err = -ENOMEM;
952 				sockfd_put(sock);
953 				goto out_unlock;
954 			}
955 
956 			err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
957 						   qid);
958 			if (err) {
959 				xp_destroy(xs->pool);
960 				xs->pool = NULL;
961 				sockfd_put(sock);
962 				goto out_unlock;
963 			}
964 		} else {
965 			/* Share the buffer pool with the other socket. */
966 			if (xs->fq_tmp || xs->cq_tmp) {
967 				/* Do not allow setting your own fq or cq. */
968 				err = -EINVAL;
969 				sockfd_put(sock);
970 				goto out_unlock;
971 			}
972 
973 			xp_get_pool(umem_xs->pool);
974 			xs->pool = umem_xs->pool;
975 
976 			/* If underlying shared umem was created without Tx
977 			 * ring, allocate Tx descs array that Tx batching API
978 			 * utilizes
979 			 */
980 			if (xs->tx && !xs->pool->tx_descs) {
981 				err = xp_alloc_tx_descs(xs->pool, xs);
982 				if (err) {
983 					xp_put_pool(xs->pool);
984 					sockfd_put(sock);
985 					goto out_unlock;
986 				}
987 			}
988 		}
989 
990 		xdp_get_umem(umem_xs->umem);
991 		WRITE_ONCE(xs->umem, umem_xs->umem);
992 		sockfd_put(sock);
993 	} else if (!xs->umem || !xsk_validate_queues(xs)) {
994 		err = -EINVAL;
995 		goto out_unlock;
996 	} else {
997 		/* This xsk has its own umem. */
998 		xs->pool = xp_create_and_assign_umem(xs, xs->umem);
999 		if (!xs->pool) {
1000 			err = -ENOMEM;
1001 			goto out_unlock;
1002 		}
1003 
1004 		err = xp_assign_dev(xs->pool, dev, qid, flags);
1005 		if (err) {
1006 			xp_destroy(xs->pool);
1007 			xs->pool = NULL;
1008 			goto out_unlock;
1009 		}
1010 	}
1011 
1012 	/* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1013 	xs->fq_tmp = NULL;
1014 	xs->cq_tmp = NULL;
1015 
1016 	xs->dev = dev;
1017 	xs->zc = xs->umem->zc;
1018 	xs->queue_id = qid;
1019 	xp_add_xsk(xs->pool, xs);
1020 
1021 out_unlock:
1022 	if (err) {
1023 		dev_put(dev);
1024 	} else {
1025 		/* Matches smp_rmb() in bind() for shared umem
1026 		 * sockets, and xsk_is_bound().
1027 		 */
1028 		smp_wmb();
1029 		WRITE_ONCE(xs->state, XSK_BOUND);
1030 	}
1031 out_release:
1032 	mutex_unlock(&xs->mutex);
1033 	rtnl_unlock();
1034 	return err;
1035 }
1036 
1037 struct xdp_umem_reg_v1 {
1038 	__u64 addr; /* Start of packet data area */
1039 	__u64 len; /* Length of packet data area */
1040 	__u32 chunk_size;
1041 	__u32 headroom;
1042 };
1043 
1044 static int xsk_setsockopt(struct socket *sock, int level, int optname,
1045 			  sockptr_t optval, unsigned int optlen)
1046 {
1047 	struct sock *sk = sock->sk;
1048 	struct xdp_sock *xs = xdp_sk(sk);
1049 	int err;
1050 
1051 	if (level != SOL_XDP)
1052 		return -ENOPROTOOPT;
1053 
1054 	switch (optname) {
1055 	case XDP_RX_RING:
1056 	case XDP_TX_RING:
1057 	{
1058 		struct xsk_queue **q;
1059 		int entries;
1060 
1061 		if (optlen < sizeof(entries))
1062 			return -EINVAL;
1063 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1064 			return -EFAULT;
1065 
1066 		mutex_lock(&xs->mutex);
1067 		if (xs->state != XSK_READY) {
1068 			mutex_unlock(&xs->mutex);
1069 			return -EBUSY;
1070 		}
1071 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
1072 		err = xsk_init_queue(entries, q, false);
1073 		if (!err && optname == XDP_TX_RING)
1074 			/* Tx needs to be explicitly woken up the first time */
1075 			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
1076 		mutex_unlock(&xs->mutex);
1077 		return err;
1078 	}
1079 	case XDP_UMEM_REG:
1080 	{
1081 		size_t mr_size = sizeof(struct xdp_umem_reg);
1082 		struct xdp_umem_reg mr = {};
1083 		struct xdp_umem *umem;
1084 
1085 		if (optlen < sizeof(struct xdp_umem_reg_v1))
1086 			return -EINVAL;
1087 		else if (optlen < sizeof(mr))
1088 			mr_size = sizeof(struct xdp_umem_reg_v1);
1089 
1090 		if (copy_from_sockptr(&mr, optval, mr_size))
1091 			return -EFAULT;
1092 
1093 		mutex_lock(&xs->mutex);
1094 		if (xs->state != XSK_READY || xs->umem) {
1095 			mutex_unlock(&xs->mutex);
1096 			return -EBUSY;
1097 		}
1098 
1099 		umem = xdp_umem_create(&mr);
1100 		if (IS_ERR(umem)) {
1101 			mutex_unlock(&xs->mutex);
1102 			return PTR_ERR(umem);
1103 		}
1104 
1105 		/* Make sure umem is ready before it can be seen by others */
1106 		smp_wmb();
1107 		WRITE_ONCE(xs->umem, umem);
1108 		mutex_unlock(&xs->mutex);
1109 		return 0;
1110 	}
1111 	case XDP_UMEM_FILL_RING:
1112 	case XDP_UMEM_COMPLETION_RING:
1113 	{
1114 		struct xsk_queue **q;
1115 		int entries;
1116 
1117 		if (copy_from_sockptr(&entries, optval, sizeof(entries)))
1118 			return -EFAULT;
1119 
1120 		mutex_lock(&xs->mutex);
1121 		if (xs->state != XSK_READY) {
1122 			mutex_unlock(&xs->mutex);
1123 			return -EBUSY;
1124 		}
1125 
1126 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1127 			&xs->cq_tmp;
1128 		err = xsk_init_queue(entries, q, true);
1129 		mutex_unlock(&xs->mutex);
1130 		return err;
1131 	}
1132 	default:
1133 		break;
1134 	}
1135 
1136 	return -ENOPROTOOPT;
1137 }
1138 
1139 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1140 {
1141 	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1142 	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1143 	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1144 }
1145 
1146 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1147 {
1148 	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1149 	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1150 	ring->desc = offsetof(struct xdp_umem_ring, desc);
1151 }
1152 
1153 struct xdp_statistics_v1 {
1154 	__u64 rx_dropped;
1155 	__u64 rx_invalid_descs;
1156 	__u64 tx_invalid_descs;
1157 };
1158 
1159 static int xsk_getsockopt(struct socket *sock, int level, int optname,
1160 			  char __user *optval, int __user *optlen)
1161 {
1162 	struct sock *sk = sock->sk;
1163 	struct xdp_sock *xs = xdp_sk(sk);
1164 	int len;
1165 
1166 	if (level != SOL_XDP)
1167 		return -ENOPROTOOPT;
1168 
1169 	if (get_user(len, optlen))
1170 		return -EFAULT;
1171 	if (len < 0)
1172 		return -EINVAL;
1173 
1174 	switch (optname) {
1175 	case XDP_STATISTICS:
1176 	{
1177 		struct xdp_statistics stats = {};
1178 		bool extra_stats = true;
1179 		size_t stats_size;
1180 
1181 		if (len < sizeof(struct xdp_statistics_v1)) {
1182 			return -EINVAL;
1183 		} else if (len < sizeof(stats)) {
1184 			extra_stats = false;
1185 			stats_size = sizeof(struct xdp_statistics_v1);
1186 		} else {
1187 			stats_size = sizeof(stats);
1188 		}
1189 
1190 		mutex_lock(&xs->mutex);
1191 		stats.rx_dropped = xs->rx_dropped;
1192 		if (extra_stats) {
1193 			stats.rx_ring_full = xs->rx_queue_full;
1194 			stats.rx_fill_ring_empty_descs =
1195 				xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
1196 			stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1197 		} else {
1198 			stats.rx_dropped += xs->rx_queue_full;
1199 		}
1200 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1201 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1202 		mutex_unlock(&xs->mutex);
1203 
1204 		if (copy_to_user(optval, &stats, stats_size))
1205 			return -EFAULT;
1206 		if (put_user(stats_size, optlen))
1207 			return -EFAULT;
1208 
1209 		return 0;
1210 	}
1211 	case XDP_MMAP_OFFSETS:
1212 	{
1213 		struct xdp_mmap_offsets off;
1214 		struct xdp_mmap_offsets_v1 off_v1;
1215 		bool flags_supported = true;
1216 		void *to_copy;
1217 
1218 		if (len < sizeof(off_v1))
1219 			return -EINVAL;
1220 		else if (len < sizeof(off))
1221 			flags_supported = false;
1222 
1223 		if (flags_supported) {
1224 			/* xdp_ring_offset is identical to xdp_ring_offset_v1
1225 			 * except for the flags field added to the end.
1226 			 */
1227 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1228 					       &off.rx);
1229 			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1230 					       &off.tx);
1231 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1232 					       &off.fr);
1233 			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1234 					       &off.cr);
1235 			off.rx.flags = offsetof(struct xdp_rxtx_ring,
1236 						ptrs.flags);
1237 			off.tx.flags = offsetof(struct xdp_rxtx_ring,
1238 						ptrs.flags);
1239 			off.fr.flags = offsetof(struct xdp_umem_ring,
1240 						ptrs.flags);
1241 			off.cr.flags = offsetof(struct xdp_umem_ring,
1242 						ptrs.flags);
1243 
1244 			len = sizeof(off);
1245 			to_copy = &off;
1246 		} else {
1247 			xsk_enter_rxtx_offsets(&off_v1.rx);
1248 			xsk_enter_rxtx_offsets(&off_v1.tx);
1249 			xsk_enter_umem_offsets(&off_v1.fr);
1250 			xsk_enter_umem_offsets(&off_v1.cr);
1251 
1252 			len = sizeof(off_v1);
1253 			to_copy = &off_v1;
1254 		}
1255 
1256 		if (copy_to_user(optval, to_copy, len))
1257 			return -EFAULT;
1258 		if (put_user(len, optlen))
1259 			return -EFAULT;
1260 
1261 		return 0;
1262 	}
1263 	case XDP_OPTIONS:
1264 	{
1265 		struct xdp_options opts = {};
1266 
1267 		if (len < sizeof(opts))
1268 			return -EINVAL;
1269 
1270 		mutex_lock(&xs->mutex);
1271 		if (xs->zc)
1272 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
1273 		mutex_unlock(&xs->mutex);
1274 
1275 		len = sizeof(opts);
1276 		if (copy_to_user(optval, &opts, len))
1277 			return -EFAULT;
1278 		if (put_user(len, optlen))
1279 			return -EFAULT;
1280 
1281 		return 0;
1282 	}
1283 	default:
1284 		break;
1285 	}
1286 
1287 	return -EOPNOTSUPP;
1288 }
1289 
1290 static int xsk_mmap(struct file *file, struct socket *sock,
1291 		    struct vm_area_struct *vma)
1292 {
1293 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1294 	unsigned long size = vma->vm_end - vma->vm_start;
1295 	struct xdp_sock *xs = xdp_sk(sock->sk);
1296 	struct xsk_queue *q = NULL;
1297 	unsigned long pfn;
1298 	struct page *qpg;
1299 
1300 	if (READ_ONCE(xs->state) != XSK_READY)
1301 		return -EBUSY;
1302 
1303 	if (offset == XDP_PGOFF_RX_RING) {
1304 		q = READ_ONCE(xs->rx);
1305 	} else if (offset == XDP_PGOFF_TX_RING) {
1306 		q = READ_ONCE(xs->tx);
1307 	} else {
1308 		/* Matches the smp_wmb() in XDP_UMEM_REG */
1309 		smp_rmb();
1310 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1311 			q = READ_ONCE(xs->fq_tmp);
1312 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1313 			q = READ_ONCE(xs->cq_tmp);
1314 	}
1315 
1316 	if (!q)
1317 		return -EINVAL;
1318 
1319 	/* Matches the smp_wmb() in xsk_init_queue */
1320 	smp_rmb();
1321 	qpg = virt_to_head_page(q->ring);
1322 	if (size > page_size(qpg))
1323 		return -EINVAL;
1324 
1325 	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1326 	return remap_pfn_range(vma, vma->vm_start, pfn,
1327 			       size, vma->vm_page_prot);
1328 }
1329 
1330 static int xsk_notifier(struct notifier_block *this,
1331 			unsigned long msg, void *ptr)
1332 {
1333 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1334 	struct net *net = dev_net(dev);
1335 	struct sock *sk;
1336 
1337 	switch (msg) {
1338 	case NETDEV_UNREGISTER:
1339 		mutex_lock(&net->xdp.lock);
1340 		sk_for_each(sk, &net->xdp.list) {
1341 			struct xdp_sock *xs = xdp_sk(sk);
1342 
1343 			mutex_lock(&xs->mutex);
1344 			if (xs->dev == dev) {
1345 				sk->sk_err = ENETDOWN;
1346 				if (!sock_flag(sk, SOCK_DEAD))
1347 					sk_error_report(sk);
1348 
1349 				xsk_unbind_dev(xs);
1350 
1351 				/* Clear device references. */
1352 				xp_clear_dev(xs->pool);
1353 			}
1354 			mutex_unlock(&xs->mutex);
1355 		}
1356 		mutex_unlock(&net->xdp.lock);
1357 		break;
1358 	}
1359 	return NOTIFY_DONE;
1360 }
1361 
1362 static struct proto xsk_proto = {
1363 	.name =		"XDP",
1364 	.owner =	THIS_MODULE,
1365 	.obj_size =	sizeof(struct xdp_sock),
1366 };
1367 
1368 static const struct proto_ops xsk_proto_ops = {
1369 	.family		= PF_XDP,
1370 	.owner		= THIS_MODULE,
1371 	.release	= xsk_release,
1372 	.bind		= xsk_bind,
1373 	.connect	= sock_no_connect,
1374 	.socketpair	= sock_no_socketpair,
1375 	.accept		= sock_no_accept,
1376 	.getname	= sock_no_getname,
1377 	.poll		= xsk_poll,
1378 	.ioctl		= sock_no_ioctl,
1379 	.listen		= sock_no_listen,
1380 	.shutdown	= sock_no_shutdown,
1381 	.setsockopt	= xsk_setsockopt,
1382 	.getsockopt	= xsk_getsockopt,
1383 	.sendmsg	= xsk_sendmsg,
1384 	.recvmsg	= xsk_recvmsg,
1385 	.mmap		= xsk_mmap,
1386 	.sendpage	= sock_no_sendpage,
1387 };
1388 
1389 static void xsk_destruct(struct sock *sk)
1390 {
1391 	struct xdp_sock *xs = xdp_sk(sk);
1392 
1393 	if (!sock_flag(sk, SOCK_DEAD))
1394 		return;
1395 
1396 	if (!xp_put_pool(xs->pool))
1397 		xdp_put_umem(xs->umem, !xs->pool);
1398 }
1399 
1400 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1401 		      int kern)
1402 {
1403 	struct xdp_sock *xs;
1404 	struct sock *sk;
1405 
1406 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
1407 		return -EPERM;
1408 	if (sock->type != SOCK_RAW)
1409 		return -ESOCKTNOSUPPORT;
1410 
1411 	if (protocol)
1412 		return -EPROTONOSUPPORT;
1413 
1414 	sock->state = SS_UNCONNECTED;
1415 
1416 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1417 	if (!sk)
1418 		return -ENOBUFS;
1419 
1420 	sock->ops = &xsk_proto_ops;
1421 
1422 	sock_init_data(sock, sk);
1423 
1424 	sk->sk_family = PF_XDP;
1425 
1426 	sk->sk_destruct = xsk_destruct;
1427 
1428 	sock_set_flag(sk, SOCK_RCU_FREE);
1429 
1430 	xs = xdp_sk(sk);
1431 	xs->state = XSK_READY;
1432 	mutex_init(&xs->mutex);
1433 	spin_lock_init(&xs->rx_lock);
1434 
1435 	INIT_LIST_HEAD(&xs->map_list);
1436 	spin_lock_init(&xs->map_list_lock);
1437 
1438 	mutex_lock(&net->xdp.lock);
1439 	sk_add_node_rcu(sk, &net->xdp.list);
1440 	mutex_unlock(&net->xdp.lock);
1441 
1442 	sock_prot_inuse_add(net, &xsk_proto, 1);
1443 
1444 	return 0;
1445 }
1446 
1447 static const struct net_proto_family xsk_family_ops = {
1448 	.family = PF_XDP,
1449 	.create = xsk_create,
1450 	.owner	= THIS_MODULE,
1451 };
1452 
1453 static struct notifier_block xsk_netdev_notifier = {
1454 	.notifier_call	= xsk_notifier,
1455 };
1456 
1457 static int __net_init xsk_net_init(struct net *net)
1458 {
1459 	mutex_init(&net->xdp.lock);
1460 	INIT_HLIST_HEAD(&net->xdp.list);
1461 	return 0;
1462 }
1463 
1464 static void __net_exit xsk_net_exit(struct net *net)
1465 {
1466 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1467 }
1468 
1469 static struct pernet_operations xsk_net_ops = {
1470 	.init = xsk_net_init,
1471 	.exit = xsk_net_exit,
1472 };
1473 
1474 static int __init xsk_init(void)
1475 {
1476 	int err, cpu;
1477 
1478 	err = proto_register(&xsk_proto, 0 /* no slab */);
1479 	if (err)
1480 		goto out;
1481 
1482 	err = sock_register(&xsk_family_ops);
1483 	if (err)
1484 		goto out_proto;
1485 
1486 	err = register_pernet_subsys(&xsk_net_ops);
1487 	if (err)
1488 		goto out_sk;
1489 
1490 	err = register_netdevice_notifier(&xsk_netdev_notifier);
1491 	if (err)
1492 		goto out_pernet;
1493 
1494 	for_each_possible_cpu(cpu)
1495 		INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1496 	return 0;
1497 
1498 out_pernet:
1499 	unregister_pernet_subsys(&xsk_net_ops);
1500 out_sk:
1501 	sock_unregister(PF_XDP);
1502 out_proto:
1503 	proto_unregister(&xsk_proto);
1504 out:
1505 	return err;
1506 }
1507 
1508 fs_initcall(xsk_init);
1509