xref: /openbmc/linux/net/xdp/xsk.c (revision 67bf4745)
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP sockets
3  *
4  * AF_XDP sockets allows a channel between XDP programs and userspace
5  * applications.
6  * Copyright(c) 2018 Intel Corporation.
7  *
8  * Author(s): Björn Töpel <bjorn.topel@intel.com>
9  *	      Magnus Karlsson <magnus.karlsson@intel.com>
10  */
11 
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13 
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
26 #include <net/xdp.h>
27 
28 #include "xsk_queue.h"
29 #include "xdp_umem.h"
30 #include "xsk.h"
31 
32 #define TX_BATCH_SIZE 16
33 
34 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
35 {
36 	return READ_ONCE(xs->rx) &&  READ_ONCE(xs->umem) &&
37 		READ_ONCE(xs->umem->fq);
38 }
39 
40 bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
41 {
42 	return xskq_has_addrs(umem->fq, cnt);
43 }
44 EXPORT_SYMBOL(xsk_umem_has_addrs);
45 
46 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
47 {
48 	return xskq_peek_addr(umem->fq, addr);
49 }
50 EXPORT_SYMBOL(xsk_umem_peek_addr);
51 
52 void xsk_umem_discard_addr(struct xdp_umem *umem)
53 {
54 	xskq_discard_addr(umem->fq);
55 }
56 EXPORT_SYMBOL(xsk_umem_discard_addr);
57 
58 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
59 {
60 	void *to_buf, *from_buf;
61 	u32 metalen;
62 	u64 addr;
63 	int err;
64 
65 	if (!xskq_peek_addr(xs->umem->fq, &addr) ||
66 	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
67 		xs->rx_dropped++;
68 		return -ENOSPC;
69 	}
70 
71 	addr += xs->umem->headroom;
72 
73 	if (unlikely(xdp_data_meta_unsupported(xdp))) {
74 		from_buf = xdp->data;
75 		metalen = 0;
76 	} else {
77 		from_buf = xdp->data_meta;
78 		metalen = xdp->data - xdp->data_meta;
79 	}
80 
81 	to_buf = xdp_umem_get_data(xs->umem, addr);
82 	memcpy(to_buf, from_buf, len + metalen);
83 	addr += metalen;
84 	err = xskq_produce_batch_desc(xs->rx, addr, len);
85 	if (!err) {
86 		xskq_discard_addr(xs->umem->fq);
87 		xdp_return_buff(xdp);
88 		return 0;
89 	}
90 
91 	xs->rx_dropped++;
92 	return err;
93 }
94 
95 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
96 {
97 	int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
98 
99 	if (err)
100 		xs->rx_dropped++;
101 
102 	return err;
103 }
104 
105 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
106 {
107 	u32 len;
108 
109 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
110 		return -EINVAL;
111 
112 	len = xdp->data_end - xdp->data;
113 
114 	return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
115 		__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
116 }
117 
118 void xsk_flush(struct xdp_sock *xs)
119 {
120 	xskq_produce_flush_desc(xs->rx);
121 	xs->sk.sk_data_ready(&xs->sk);
122 }
123 
124 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
125 {
126 	u32 metalen = xdp->data - xdp->data_meta;
127 	u32 len = xdp->data_end - xdp->data;
128 	void *buffer;
129 	u64 addr;
130 	int err;
131 
132 	spin_lock_bh(&xs->rx_lock);
133 
134 	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
135 		err = -EINVAL;
136 		goto out_unlock;
137 	}
138 
139 	if (!xskq_peek_addr(xs->umem->fq, &addr) ||
140 	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
141 		err = -ENOSPC;
142 		goto out_drop;
143 	}
144 
145 	addr += xs->umem->headroom;
146 
147 	buffer = xdp_umem_get_data(xs->umem, addr);
148 	memcpy(buffer, xdp->data_meta, len + metalen);
149 	addr += metalen;
150 	err = xskq_produce_batch_desc(xs->rx, addr, len);
151 	if (err)
152 		goto out_drop;
153 
154 	xskq_discard_addr(xs->umem->fq);
155 	xskq_produce_flush_desc(xs->rx);
156 
157 	spin_unlock_bh(&xs->rx_lock);
158 
159 	xs->sk.sk_data_ready(&xs->sk);
160 	return 0;
161 
162 out_drop:
163 	xs->rx_dropped++;
164 out_unlock:
165 	spin_unlock_bh(&xs->rx_lock);
166 	return err;
167 }
168 
169 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
170 {
171 	xskq_produce_flush_addr_n(umem->cq, nb_entries);
172 }
173 EXPORT_SYMBOL(xsk_umem_complete_tx);
174 
175 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
176 {
177 	struct xdp_sock *xs;
178 
179 	rcu_read_lock();
180 	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
181 		xs->sk.sk_write_space(&xs->sk);
182 	}
183 	rcu_read_unlock();
184 }
185 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
186 
187 bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
188 {
189 	struct xdp_sock *xs;
190 
191 	rcu_read_lock();
192 	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
193 		if (!xskq_peek_desc(xs->tx, desc))
194 			continue;
195 
196 		if (xskq_produce_addr_lazy(umem->cq, desc->addr))
197 			goto out;
198 
199 		xskq_discard_desc(xs->tx);
200 		rcu_read_unlock();
201 		return true;
202 	}
203 
204 out:
205 	rcu_read_unlock();
206 	return false;
207 }
208 EXPORT_SYMBOL(xsk_umem_consume_tx);
209 
210 static int xsk_zc_xmit(struct sock *sk)
211 {
212 	struct xdp_sock *xs = xdp_sk(sk);
213 	struct net_device *dev = xs->dev;
214 
215 	return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
216 }
217 
218 static void xsk_destruct_skb(struct sk_buff *skb)
219 {
220 	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
221 	struct xdp_sock *xs = xdp_sk(skb->sk);
222 	unsigned long flags;
223 
224 	spin_lock_irqsave(&xs->tx_completion_lock, flags);
225 	WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
226 	spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
227 
228 	sock_wfree(skb);
229 }
230 
231 static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
232 			    size_t total_len)
233 {
234 	u32 max_batch = TX_BATCH_SIZE;
235 	struct xdp_sock *xs = xdp_sk(sk);
236 	bool sent_frame = false;
237 	struct xdp_desc desc;
238 	struct sk_buff *skb;
239 	int err = 0;
240 
241 	mutex_lock(&xs->mutex);
242 
243 	while (xskq_peek_desc(xs->tx, &desc)) {
244 		char *buffer;
245 		u64 addr;
246 		u32 len;
247 
248 		if (max_batch-- == 0) {
249 			err = -EAGAIN;
250 			goto out;
251 		}
252 
253 		if (xskq_reserve_addr(xs->umem->cq))
254 			goto out;
255 
256 		if (xs->queue_id >= xs->dev->real_num_tx_queues)
257 			goto out;
258 
259 		len = desc.len;
260 		skb = sock_alloc_send_skb(sk, len, 1, &err);
261 		if (unlikely(!skb)) {
262 			err = -EAGAIN;
263 			goto out;
264 		}
265 
266 		skb_put(skb, len);
267 		addr = desc.addr;
268 		buffer = xdp_umem_get_data(xs->umem, addr);
269 		err = skb_store_bits(skb, 0, buffer, len);
270 		if (unlikely(err)) {
271 			kfree_skb(skb);
272 			goto out;
273 		}
274 
275 		skb->dev = xs->dev;
276 		skb->priority = sk->sk_priority;
277 		skb->mark = sk->sk_mark;
278 		skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
279 		skb->destructor = xsk_destruct_skb;
280 
281 		err = dev_direct_xmit(skb, xs->queue_id);
282 		xskq_discard_desc(xs->tx);
283 		/* Ignore NET_XMIT_CN as packet might have been sent */
284 		if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
285 			/* SKB completed but not sent */
286 			err = -EBUSY;
287 			goto out;
288 		}
289 
290 		sent_frame = true;
291 	}
292 
293 out:
294 	if (sent_frame)
295 		sk->sk_write_space(sk);
296 
297 	mutex_unlock(&xs->mutex);
298 	return err;
299 }
300 
301 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
302 {
303 	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
304 	struct sock *sk = sock->sk;
305 	struct xdp_sock *xs = xdp_sk(sk);
306 
307 	if (unlikely(!xs->dev))
308 		return -ENXIO;
309 	if (unlikely(!(xs->dev->flags & IFF_UP)))
310 		return -ENETDOWN;
311 	if (unlikely(!xs->tx))
312 		return -ENOBUFS;
313 	if (need_wait)
314 		return -EOPNOTSUPP;
315 
316 	return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
317 }
318 
319 static unsigned int xsk_poll(struct file *file, struct socket *sock,
320 			     struct poll_table_struct *wait)
321 {
322 	unsigned int mask = datagram_poll(file, sock, wait);
323 	struct sock *sk = sock->sk;
324 	struct xdp_sock *xs = xdp_sk(sk);
325 
326 	if (xs->rx && !xskq_empty_desc(xs->rx))
327 		mask |= POLLIN | POLLRDNORM;
328 	if (xs->tx && !xskq_full_desc(xs->tx))
329 		mask |= POLLOUT | POLLWRNORM;
330 
331 	return mask;
332 }
333 
334 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
335 			  bool umem_queue)
336 {
337 	struct xsk_queue *q;
338 
339 	if (entries == 0 || *queue || !is_power_of_2(entries))
340 		return -EINVAL;
341 
342 	q = xskq_create(entries, umem_queue);
343 	if (!q)
344 		return -ENOMEM;
345 
346 	/* Make sure queue is ready before it can be seen by others */
347 	smp_wmb();
348 	*queue = q;
349 	return 0;
350 }
351 
352 static void xsk_unbind_dev(struct xdp_sock *xs)
353 {
354 	struct net_device *dev = xs->dev;
355 
356 	if (!dev || xs->state != XSK_BOUND)
357 		return;
358 
359 	xs->state = XSK_UNBOUND;
360 
361 	/* Wait for driver to stop using the xdp socket. */
362 	xdp_del_sk_umem(xs->umem, xs);
363 	xs->dev = NULL;
364 	synchronize_net();
365 	dev_put(dev);
366 }
367 
368 static int xsk_release(struct socket *sock)
369 {
370 	struct sock *sk = sock->sk;
371 	struct xdp_sock *xs = xdp_sk(sk);
372 	struct net *net;
373 
374 	if (!sk)
375 		return 0;
376 
377 	net = sock_net(sk);
378 
379 	mutex_lock(&net->xdp.lock);
380 	sk_del_node_init_rcu(sk);
381 	mutex_unlock(&net->xdp.lock);
382 
383 	local_bh_disable();
384 	sock_prot_inuse_add(net, sk->sk_prot, -1);
385 	local_bh_enable();
386 
387 	xsk_unbind_dev(xs);
388 
389 	xskq_destroy(xs->rx);
390 	xskq_destroy(xs->tx);
391 
392 	sock_orphan(sk);
393 	sock->sk = NULL;
394 
395 	sk_refcnt_debug_release(sk);
396 	sock_put(sk);
397 
398 	return 0;
399 }
400 
401 static struct socket *xsk_lookup_xsk_from_fd(int fd)
402 {
403 	struct socket *sock;
404 	int err;
405 
406 	sock = sockfd_lookup(fd, &err);
407 	if (!sock)
408 		return ERR_PTR(-ENOTSOCK);
409 
410 	if (sock->sk->sk_family != PF_XDP) {
411 		sockfd_put(sock);
412 		return ERR_PTR(-ENOPROTOOPT);
413 	}
414 
415 	return sock;
416 }
417 
418 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
419 {
420 	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
421 	struct sock *sk = sock->sk;
422 	struct xdp_sock *xs = xdp_sk(sk);
423 	struct net_device *dev;
424 	u32 flags, qid;
425 	int err = 0;
426 
427 	if (addr_len < sizeof(struct sockaddr_xdp))
428 		return -EINVAL;
429 	if (sxdp->sxdp_family != AF_XDP)
430 		return -EINVAL;
431 
432 	flags = sxdp->sxdp_flags;
433 	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
434 		return -EINVAL;
435 
436 	mutex_lock(&xs->mutex);
437 	if (xs->state != XSK_READY) {
438 		err = -EBUSY;
439 		goto out_release;
440 	}
441 
442 	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
443 	if (!dev) {
444 		err = -ENODEV;
445 		goto out_release;
446 	}
447 
448 	if (!xs->rx && !xs->tx) {
449 		err = -EINVAL;
450 		goto out_unlock;
451 	}
452 
453 	qid = sxdp->sxdp_queue_id;
454 
455 	if (flags & XDP_SHARED_UMEM) {
456 		struct xdp_sock *umem_xs;
457 		struct socket *sock;
458 
459 		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
460 			/* Cannot specify flags for shared sockets. */
461 			err = -EINVAL;
462 			goto out_unlock;
463 		}
464 
465 		if (xs->umem) {
466 			/* We have already our own. */
467 			err = -EINVAL;
468 			goto out_unlock;
469 		}
470 
471 		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
472 		if (IS_ERR(sock)) {
473 			err = PTR_ERR(sock);
474 			goto out_unlock;
475 		}
476 
477 		umem_xs = xdp_sk(sock->sk);
478 		if (!umem_xs->umem) {
479 			/* No umem to inherit. */
480 			err = -EBADF;
481 			sockfd_put(sock);
482 			goto out_unlock;
483 		} else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
484 			err = -EINVAL;
485 			sockfd_put(sock);
486 			goto out_unlock;
487 		}
488 
489 		xdp_get_umem(umem_xs->umem);
490 		xs->umem = umem_xs->umem;
491 		sockfd_put(sock);
492 	} else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
493 		err = -EINVAL;
494 		goto out_unlock;
495 	} else {
496 		/* This xsk has its own umem. */
497 		xskq_set_umem(xs->umem->fq, xs->umem->size,
498 			      xs->umem->chunk_mask);
499 		xskq_set_umem(xs->umem->cq, xs->umem->size,
500 			      xs->umem->chunk_mask);
501 
502 		err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
503 		if (err)
504 			goto out_unlock;
505 	}
506 
507 	xs->dev = dev;
508 	xs->zc = xs->umem->zc;
509 	xs->queue_id = qid;
510 	xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
511 	xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
512 	xdp_add_sk_umem(xs->umem, xs);
513 
514 out_unlock:
515 	if (err)
516 		dev_put(dev);
517 	else
518 		xs->state = XSK_BOUND;
519 out_release:
520 	mutex_unlock(&xs->mutex);
521 	return err;
522 }
523 
524 static int xsk_setsockopt(struct socket *sock, int level, int optname,
525 			  char __user *optval, unsigned int optlen)
526 {
527 	struct sock *sk = sock->sk;
528 	struct xdp_sock *xs = xdp_sk(sk);
529 	int err;
530 
531 	if (level != SOL_XDP)
532 		return -ENOPROTOOPT;
533 
534 	switch (optname) {
535 	case XDP_RX_RING:
536 	case XDP_TX_RING:
537 	{
538 		struct xsk_queue **q;
539 		int entries;
540 
541 		if (optlen < sizeof(entries))
542 			return -EINVAL;
543 		if (copy_from_user(&entries, optval, sizeof(entries)))
544 			return -EFAULT;
545 
546 		mutex_lock(&xs->mutex);
547 		if (xs->state != XSK_READY) {
548 			mutex_unlock(&xs->mutex);
549 			return -EBUSY;
550 		}
551 		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
552 		err = xsk_init_queue(entries, q, false);
553 		mutex_unlock(&xs->mutex);
554 		return err;
555 	}
556 	case XDP_UMEM_REG:
557 	{
558 		struct xdp_umem_reg mr;
559 		struct xdp_umem *umem;
560 
561 		if (copy_from_user(&mr, optval, sizeof(mr)))
562 			return -EFAULT;
563 
564 		mutex_lock(&xs->mutex);
565 		if (xs->state != XSK_READY || xs->umem) {
566 			mutex_unlock(&xs->mutex);
567 			return -EBUSY;
568 		}
569 
570 		umem = xdp_umem_create(&mr);
571 		if (IS_ERR(umem)) {
572 			mutex_unlock(&xs->mutex);
573 			return PTR_ERR(umem);
574 		}
575 
576 		/* Make sure umem is ready before it can be seen by others */
577 		smp_wmb();
578 		xs->umem = umem;
579 		mutex_unlock(&xs->mutex);
580 		return 0;
581 	}
582 	case XDP_UMEM_FILL_RING:
583 	case XDP_UMEM_COMPLETION_RING:
584 	{
585 		struct xsk_queue **q;
586 		int entries;
587 
588 		if (copy_from_user(&entries, optval, sizeof(entries)))
589 			return -EFAULT;
590 
591 		mutex_lock(&xs->mutex);
592 		if (xs->state != XSK_READY) {
593 			mutex_unlock(&xs->mutex);
594 			return -EBUSY;
595 		}
596 		if (!xs->umem) {
597 			mutex_unlock(&xs->mutex);
598 			return -EINVAL;
599 		}
600 
601 		q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
602 			&xs->umem->cq;
603 		err = xsk_init_queue(entries, q, true);
604 		mutex_unlock(&xs->mutex);
605 		return err;
606 	}
607 	default:
608 		break;
609 	}
610 
611 	return -ENOPROTOOPT;
612 }
613 
614 static int xsk_getsockopt(struct socket *sock, int level, int optname,
615 			  char __user *optval, int __user *optlen)
616 {
617 	struct sock *sk = sock->sk;
618 	struct xdp_sock *xs = xdp_sk(sk);
619 	int len;
620 
621 	if (level != SOL_XDP)
622 		return -ENOPROTOOPT;
623 
624 	if (get_user(len, optlen))
625 		return -EFAULT;
626 	if (len < 0)
627 		return -EINVAL;
628 
629 	switch (optname) {
630 	case XDP_STATISTICS:
631 	{
632 		struct xdp_statistics stats;
633 
634 		if (len < sizeof(stats))
635 			return -EINVAL;
636 
637 		mutex_lock(&xs->mutex);
638 		stats.rx_dropped = xs->rx_dropped;
639 		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
640 		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
641 		mutex_unlock(&xs->mutex);
642 
643 		if (copy_to_user(optval, &stats, sizeof(stats)))
644 			return -EFAULT;
645 		if (put_user(sizeof(stats), optlen))
646 			return -EFAULT;
647 
648 		return 0;
649 	}
650 	case XDP_MMAP_OFFSETS:
651 	{
652 		struct xdp_mmap_offsets off;
653 
654 		if (len < sizeof(off))
655 			return -EINVAL;
656 
657 		off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
658 		off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
659 		off.rx.desc	= offsetof(struct xdp_rxtx_ring, desc);
660 		off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
661 		off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
662 		off.tx.desc	= offsetof(struct xdp_rxtx_ring, desc);
663 
664 		off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
665 		off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
666 		off.fr.desc	= offsetof(struct xdp_umem_ring, desc);
667 		off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
668 		off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
669 		off.cr.desc	= offsetof(struct xdp_umem_ring, desc);
670 
671 		len = sizeof(off);
672 		if (copy_to_user(optval, &off, len))
673 			return -EFAULT;
674 		if (put_user(len, optlen))
675 			return -EFAULT;
676 
677 		return 0;
678 	}
679 	case XDP_OPTIONS:
680 	{
681 		struct xdp_options opts = {};
682 
683 		if (len < sizeof(opts))
684 			return -EINVAL;
685 
686 		mutex_lock(&xs->mutex);
687 		if (xs->zc)
688 			opts.flags |= XDP_OPTIONS_ZEROCOPY;
689 		mutex_unlock(&xs->mutex);
690 
691 		len = sizeof(opts);
692 		if (copy_to_user(optval, &opts, len))
693 			return -EFAULT;
694 		if (put_user(len, optlen))
695 			return -EFAULT;
696 
697 		return 0;
698 	}
699 	default:
700 		break;
701 	}
702 
703 	return -EOPNOTSUPP;
704 }
705 
706 static int xsk_mmap(struct file *file, struct socket *sock,
707 		    struct vm_area_struct *vma)
708 {
709 	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
710 	unsigned long size = vma->vm_end - vma->vm_start;
711 	struct xdp_sock *xs = xdp_sk(sock->sk);
712 	struct xsk_queue *q = NULL;
713 	struct xdp_umem *umem;
714 	unsigned long pfn;
715 	struct page *qpg;
716 
717 	if (xs->state != XSK_READY)
718 		return -EBUSY;
719 
720 	if (offset == XDP_PGOFF_RX_RING) {
721 		q = READ_ONCE(xs->rx);
722 	} else if (offset == XDP_PGOFF_TX_RING) {
723 		q = READ_ONCE(xs->tx);
724 	} else {
725 		umem = READ_ONCE(xs->umem);
726 		if (!umem)
727 			return -EINVAL;
728 
729 		/* Matches the smp_wmb() in XDP_UMEM_REG */
730 		smp_rmb();
731 		if (offset == XDP_UMEM_PGOFF_FILL_RING)
732 			q = READ_ONCE(umem->fq);
733 		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
734 			q = READ_ONCE(umem->cq);
735 	}
736 
737 	if (!q)
738 		return -EINVAL;
739 
740 	/* Matches the smp_wmb() in xsk_init_queue */
741 	smp_rmb();
742 	qpg = virt_to_head_page(q->ring);
743 	if (size > (PAGE_SIZE << compound_order(qpg)))
744 		return -EINVAL;
745 
746 	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
747 	return remap_pfn_range(vma, vma->vm_start, pfn,
748 			       size, vma->vm_page_prot);
749 }
750 
751 static int xsk_notifier(struct notifier_block *this,
752 			unsigned long msg, void *ptr)
753 {
754 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
755 	struct net *net = dev_net(dev);
756 	struct sock *sk;
757 
758 	switch (msg) {
759 	case NETDEV_UNREGISTER:
760 		mutex_lock(&net->xdp.lock);
761 		sk_for_each(sk, &net->xdp.list) {
762 			struct xdp_sock *xs = xdp_sk(sk);
763 
764 			mutex_lock(&xs->mutex);
765 			if (xs->dev == dev) {
766 				sk->sk_err = ENETDOWN;
767 				if (!sock_flag(sk, SOCK_DEAD))
768 					sk->sk_error_report(sk);
769 
770 				xsk_unbind_dev(xs);
771 
772 				/* Clear device references in umem. */
773 				xdp_umem_clear_dev(xs->umem);
774 			}
775 			mutex_unlock(&xs->mutex);
776 		}
777 		mutex_unlock(&net->xdp.lock);
778 		break;
779 	}
780 	return NOTIFY_DONE;
781 }
782 
783 static struct proto xsk_proto = {
784 	.name =		"XDP",
785 	.owner =	THIS_MODULE,
786 	.obj_size =	sizeof(struct xdp_sock),
787 };
788 
789 static const struct proto_ops xsk_proto_ops = {
790 	.family		= PF_XDP,
791 	.owner		= THIS_MODULE,
792 	.release	= xsk_release,
793 	.bind		= xsk_bind,
794 	.connect	= sock_no_connect,
795 	.socketpair	= sock_no_socketpair,
796 	.accept		= sock_no_accept,
797 	.getname	= sock_no_getname,
798 	.poll		= xsk_poll,
799 	.ioctl		= sock_no_ioctl,
800 	.listen		= sock_no_listen,
801 	.shutdown	= sock_no_shutdown,
802 	.setsockopt	= xsk_setsockopt,
803 	.getsockopt	= xsk_getsockopt,
804 	.sendmsg	= xsk_sendmsg,
805 	.recvmsg	= sock_no_recvmsg,
806 	.mmap		= xsk_mmap,
807 	.sendpage	= sock_no_sendpage,
808 };
809 
810 static void xsk_destruct(struct sock *sk)
811 {
812 	struct xdp_sock *xs = xdp_sk(sk);
813 
814 	if (!sock_flag(sk, SOCK_DEAD))
815 		return;
816 
817 	xdp_put_umem(xs->umem);
818 
819 	sk_refcnt_debug_dec(sk);
820 }
821 
822 static int xsk_create(struct net *net, struct socket *sock, int protocol,
823 		      int kern)
824 {
825 	struct sock *sk;
826 	struct xdp_sock *xs;
827 
828 	if (!ns_capable(net->user_ns, CAP_NET_RAW))
829 		return -EPERM;
830 	if (sock->type != SOCK_RAW)
831 		return -ESOCKTNOSUPPORT;
832 
833 	if (protocol)
834 		return -EPROTONOSUPPORT;
835 
836 	sock->state = SS_UNCONNECTED;
837 
838 	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
839 	if (!sk)
840 		return -ENOBUFS;
841 
842 	sock->ops = &xsk_proto_ops;
843 
844 	sock_init_data(sock, sk);
845 
846 	sk->sk_family = PF_XDP;
847 
848 	sk->sk_destruct = xsk_destruct;
849 	sk_refcnt_debug_inc(sk);
850 
851 	sock_set_flag(sk, SOCK_RCU_FREE);
852 
853 	xs = xdp_sk(sk);
854 	xs->state = XSK_READY;
855 	mutex_init(&xs->mutex);
856 	spin_lock_init(&xs->rx_lock);
857 	spin_lock_init(&xs->tx_completion_lock);
858 
859 	mutex_lock(&net->xdp.lock);
860 	sk_add_node_rcu(sk, &net->xdp.list);
861 	mutex_unlock(&net->xdp.lock);
862 
863 	local_bh_disable();
864 	sock_prot_inuse_add(net, &xsk_proto, 1);
865 	local_bh_enable();
866 
867 	return 0;
868 }
869 
870 static const struct net_proto_family xsk_family_ops = {
871 	.family = PF_XDP,
872 	.create = xsk_create,
873 	.owner	= THIS_MODULE,
874 };
875 
876 static struct notifier_block xsk_netdev_notifier = {
877 	.notifier_call	= xsk_notifier,
878 };
879 
880 static int __net_init xsk_net_init(struct net *net)
881 {
882 	mutex_init(&net->xdp.lock);
883 	INIT_HLIST_HEAD(&net->xdp.list);
884 	return 0;
885 }
886 
887 static void __net_exit xsk_net_exit(struct net *net)
888 {
889 	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
890 }
891 
892 static struct pernet_operations xsk_net_ops = {
893 	.init = xsk_net_init,
894 	.exit = xsk_net_exit,
895 };
896 
897 static int __init xsk_init(void)
898 {
899 	int err;
900 
901 	err = proto_register(&xsk_proto, 0 /* no slab */);
902 	if (err)
903 		goto out;
904 
905 	err = sock_register(&xsk_family_ops);
906 	if (err)
907 		goto out_proto;
908 
909 	err = register_pernet_subsys(&xsk_net_ops);
910 	if (err)
911 		goto out_sk;
912 
913 	err = register_netdevice_notifier(&xsk_netdev_notifier);
914 	if (err)
915 		goto out_pernet;
916 
917 	return 0;
918 
919 out_pernet:
920 	unregister_pernet_subsys(&xsk_net_ops);
921 out_sk:
922 	sock_unregister(PF_XDP);
923 out_proto:
924 	proto_unregister(&xsk_proto);
925 out:
926 	return err;
927 }
928 
929 fs_initcall(xsk_init);
930