xref: /openbmc/linux/net/l2tp/l2tp_core.c (revision d5a9588c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* L2TP core.
3  *
4  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5  *
6  * This file contains some code of the original L2TPv2 pppol2tp
7  * driver, which has the following copyright:
8  *
9  * Authors:	Martijn van Oosterhout <kleptog@svana.org>
10  *		James Chapman (jchapman@katalix.com)
11  * Contributors:
12  *		Michal Ostrowski <mostrows@speakeasy.net>
13  *		Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14  *		David S. Miller (davem@redhat.com)
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/list.h>
22 #include <linux/rculist.h>
23 #include <linux/uaccess.h>
24 
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/kthread.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/errno.h>
31 #include <linux/jiffies.h>
32 
33 #include <linux/netdevice.h>
34 #include <linux/net.h>
35 #include <linux/inetdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/init.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/udp.h>
41 #include <linux/l2tp.h>
42 #include <linux/hash.h>
43 #include <linux/sort.h>
44 #include <linux/file.h>
45 #include <linux/nsproxy.h>
46 #include <net/net_namespace.h>
47 #include <net/netns/generic.h>
48 #include <net/dst.h>
49 #include <net/ip.h>
50 #include <net/udp.h>
51 #include <net/udp_tunnel.h>
52 #include <net/inet_common.h>
53 #include <net/xfrm.h>
54 #include <net/protocol.h>
55 #include <net/inet6_connection_sock.h>
56 #include <net/inet_ecn.h>
57 #include <net/ip6_route.h>
58 #include <net/ip6_checksum.h>
59 
60 #include <asm/byteorder.h>
61 #include <linux/atomic.h>
62 
63 #include "l2tp_core.h"
64 #include "trace.h"
65 
66 #define CREATE_TRACE_POINTS
67 #include "trace.h"
68 
69 #define L2TP_DRV_VERSION	"V2.0"
70 
71 /* L2TP header constants */
72 #define L2TP_HDRFLAG_T	   0x8000
73 #define L2TP_HDRFLAG_L	   0x4000
74 #define L2TP_HDRFLAG_S	   0x0800
75 #define L2TP_HDRFLAG_O	   0x0200
76 #define L2TP_HDRFLAG_P	   0x0100
77 
78 #define L2TP_HDR_VER_MASK  0x000F
79 #define L2TP_HDR_VER_2	   0x0002
80 #define L2TP_HDR_VER_3	   0x0003
81 
82 /* L2TPv3 default L2-specific sublayer */
83 #define L2TP_SLFLAG_S	   0x40000000
84 #define L2TP_SL_SEQ_MASK   0x00ffffff
85 
86 #define L2TP_HDR_SIZE_MAX		14
87 
88 /* Default trace flags */
89 #define L2TP_DEFAULT_DEBUG_FLAGS	0
90 
91 #define L2TP_DEPTH_NESTING		2
92 #if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
93 #error "L2TP requires its own lockdep subclass"
94 #endif
95 
96 /* Private data stored for received packets in the skb.
97  */
98 struct l2tp_skb_cb {
99 	u32			ns;
100 	u16			has_seq;
101 	u16			length;
102 	unsigned long		expires;
103 };
104 
105 #define L2TP_SKB_CB(skb)	((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
106 
107 static struct workqueue_struct *l2tp_wq;
108 
109 /* per-net private data for this module */
110 static unsigned int l2tp_net_id;
111 struct l2tp_net {
112 	/* Lock for write access to l2tp_tunnel_idr */
113 	spinlock_t l2tp_tunnel_idr_lock;
114 	struct idr l2tp_tunnel_idr;
115 	struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
116 	/* Lock for write access to l2tp_session_hlist */
117 	spinlock_t l2tp_session_hlist_lock;
118 };
119 
120 #if IS_ENABLED(CONFIG_IPV6)
121 static bool l2tp_sk_is_v6(struct sock *sk)
122 {
123 	return sk->sk_family == PF_INET6 &&
124 	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
125 }
126 #endif
127 
128 static inline struct l2tp_net *l2tp_pernet(const struct net *net)
129 {
130 	return net_generic(net, l2tp_net_id);
131 }
132 
133 /* Session hash global list for L2TPv3.
134  * The session_id SHOULD be random according to RFC3931, but several
135  * L2TP implementations use incrementing session_ids.  So we do a real
136  * hash on the session_id, rather than a simple bitmask.
137  */
138 static inline struct hlist_head *
139 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
140 {
141 	return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
142 }
143 
144 /* Session hash list.
145  * The session_id SHOULD be random according to RFC2661, but several
146  * L2TP implementations (Cisco and Microsoft) use incrementing
147  * session_ids.  So we do a real hash on the session_id, rather than a
148  * simple bitmask.
149  */
150 static inline struct hlist_head *
151 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
152 {
153 	return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
154 }
155 
156 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
157 {
158 	trace_free_tunnel(tunnel);
159 	sock_put(tunnel->sock);
160 	/* the tunnel is freed in the socket destructor */
161 }
162 
163 static void l2tp_session_free(struct l2tp_session *session)
164 {
165 	trace_free_session(session);
166 	if (session->tunnel)
167 		l2tp_tunnel_dec_refcount(session->tunnel);
168 	kfree(session);
169 }
170 
171 struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
172 {
173 	struct l2tp_tunnel *tunnel = sk->sk_user_data;
174 
175 	if (tunnel)
176 		if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
177 			return NULL;
178 
179 	return tunnel;
180 }
181 EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
182 
183 void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
184 {
185 	refcount_inc(&tunnel->ref_count);
186 }
187 EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount);
188 
189 void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
190 {
191 	if (refcount_dec_and_test(&tunnel->ref_count))
192 		l2tp_tunnel_free(tunnel);
193 }
194 EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount);
195 
196 void l2tp_session_inc_refcount(struct l2tp_session *session)
197 {
198 	refcount_inc(&session->ref_count);
199 }
200 EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount);
201 
202 void l2tp_session_dec_refcount(struct l2tp_session *session)
203 {
204 	if (refcount_dec_and_test(&session->ref_count))
205 		l2tp_session_free(session);
206 }
207 EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount);
208 
209 /* Lookup a tunnel. A new reference is held on the returned tunnel. */
210 struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
211 {
212 	const struct l2tp_net *pn = l2tp_pernet(net);
213 	struct l2tp_tunnel *tunnel;
214 
215 	rcu_read_lock_bh();
216 	tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
217 	if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
218 		rcu_read_unlock_bh();
219 		return tunnel;
220 	}
221 	rcu_read_unlock_bh();
222 
223 	return NULL;
224 }
225 EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
226 
227 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
228 {
229 	struct l2tp_net *pn = l2tp_pernet(net);
230 	unsigned long tunnel_id, tmp;
231 	struct l2tp_tunnel *tunnel;
232 	int count = 0;
233 
234 	rcu_read_lock_bh();
235 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
236 		if (tunnel && ++count > nth &&
237 		    refcount_inc_not_zero(&tunnel->ref_count)) {
238 			rcu_read_unlock_bh();
239 			return tunnel;
240 		}
241 	}
242 	rcu_read_unlock_bh();
243 
244 	return NULL;
245 }
246 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth);
247 
248 struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
249 					     u32 session_id)
250 {
251 	struct hlist_head *session_list;
252 	struct l2tp_session *session;
253 
254 	session_list = l2tp_session_id_hash(tunnel, session_id);
255 
256 	rcu_read_lock_bh();
257 	hlist_for_each_entry_rcu(session, session_list, hlist)
258 		if (session->session_id == session_id) {
259 			l2tp_session_inc_refcount(session);
260 			rcu_read_unlock_bh();
261 
262 			return session;
263 		}
264 	rcu_read_unlock_bh();
265 
266 	return NULL;
267 }
268 EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session);
269 
270 struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id)
271 {
272 	struct hlist_head *session_list;
273 	struct l2tp_session *session;
274 
275 	session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id);
276 
277 	rcu_read_lock_bh();
278 	hlist_for_each_entry_rcu(session, session_list, global_hlist)
279 		if (session->session_id == session_id) {
280 			l2tp_session_inc_refcount(session);
281 			rcu_read_unlock_bh();
282 
283 			return session;
284 		}
285 	rcu_read_unlock_bh();
286 
287 	return NULL;
288 }
289 EXPORT_SYMBOL_GPL(l2tp_session_get);
290 
291 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
292 {
293 	int hash;
294 	struct l2tp_session *session;
295 	int count = 0;
296 
297 	rcu_read_lock_bh();
298 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
299 		hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
300 			if (++count > nth) {
301 				l2tp_session_inc_refcount(session);
302 				rcu_read_unlock_bh();
303 				return session;
304 			}
305 		}
306 	}
307 
308 	rcu_read_unlock_bh();
309 
310 	return NULL;
311 }
312 EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
313 
314 /* Lookup a session by interface name.
315  * This is very inefficient but is only used by management interfaces.
316  */
317 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
318 						const char *ifname)
319 {
320 	struct l2tp_net *pn = l2tp_pernet(net);
321 	int hash;
322 	struct l2tp_session *session;
323 
324 	rcu_read_lock_bh();
325 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
326 		hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
327 			if (!strcmp(session->ifname, ifname)) {
328 				l2tp_session_inc_refcount(session);
329 				rcu_read_unlock_bh();
330 
331 				return session;
332 			}
333 		}
334 	}
335 
336 	rcu_read_unlock_bh();
337 
338 	return NULL;
339 }
340 EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
341 
342 int l2tp_session_register(struct l2tp_session *session,
343 			  struct l2tp_tunnel *tunnel)
344 {
345 	struct l2tp_session *session_walk;
346 	struct hlist_head *g_head;
347 	struct hlist_head *head;
348 	struct l2tp_net *pn;
349 	int err;
350 
351 	head = l2tp_session_id_hash(tunnel, session->session_id);
352 
353 	spin_lock_bh(&tunnel->hlist_lock);
354 	if (!tunnel->acpt_newsess) {
355 		err = -ENODEV;
356 		goto err_tlock;
357 	}
358 
359 	hlist_for_each_entry(session_walk, head, hlist)
360 		if (session_walk->session_id == session->session_id) {
361 			err = -EEXIST;
362 			goto err_tlock;
363 		}
364 
365 	if (tunnel->version == L2TP_HDR_VER_3) {
366 		pn = l2tp_pernet(tunnel->l2tp_net);
367 		g_head = l2tp_session_id_hash_2(pn, session->session_id);
368 
369 		spin_lock_bh(&pn->l2tp_session_hlist_lock);
370 
371 		/* IP encap expects session IDs to be globally unique, while
372 		 * UDP encap doesn't.
373 		 */
374 		hlist_for_each_entry(session_walk, g_head, global_hlist)
375 			if (session_walk->session_id == session->session_id &&
376 			    (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP ||
377 			     tunnel->encap == L2TP_ENCAPTYPE_IP)) {
378 				err = -EEXIST;
379 				goto err_tlock_pnlock;
380 			}
381 
382 		l2tp_tunnel_inc_refcount(tunnel);
383 		hlist_add_head_rcu(&session->global_hlist, g_head);
384 
385 		spin_unlock_bh(&pn->l2tp_session_hlist_lock);
386 	} else {
387 		l2tp_tunnel_inc_refcount(tunnel);
388 	}
389 
390 	hlist_add_head_rcu(&session->hlist, head);
391 	spin_unlock_bh(&tunnel->hlist_lock);
392 
393 	trace_register_session(session);
394 
395 	return 0;
396 
397 err_tlock_pnlock:
398 	spin_unlock_bh(&pn->l2tp_session_hlist_lock);
399 err_tlock:
400 	spin_unlock_bh(&tunnel->hlist_lock);
401 
402 	return err;
403 }
404 EXPORT_SYMBOL_GPL(l2tp_session_register);
405 
406 /*****************************************************************************
407  * Receive data handling
408  *****************************************************************************/
409 
410 /* Queue a skb in order. We come here only if the skb has an L2TP sequence
411  * number.
412  */
413 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
414 {
415 	struct sk_buff *skbp;
416 	struct sk_buff *tmp;
417 	u32 ns = L2TP_SKB_CB(skb)->ns;
418 
419 	spin_lock_bh(&session->reorder_q.lock);
420 	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
421 		if (L2TP_SKB_CB(skbp)->ns > ns) {
422 			__skb_queue_before(&session->reorder_q, skbp, skb);
423 			atomic_long_inc(&session->stats.rx_oos_packets);
424 			goto out;
425 		}
426 	}
427 
428 	__skb_queue_tail(&session->reorder_q, skb);
429 
430 out:
431 	spin_unlock_bh(&session->reorder_q.lock);
432 }
433 
434 /* Dequeue a single skb.
435  */
436 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
437 {
438 	struct l2tp_tunnel *tunnel = session->tunnel;
439 	int length = L2TP_SKB_CB(skb)->length;
440 
441 	/* We're about to requeue the skb, so return resources
442 	 * to its current owner (a socket receive buffer).
443 	 */
444 	skb_orphan(skb);
445 
446 	atomic_long_inc(&tunnel->stats.rx_packets);
447 	atomic_long_add(length, &tunnel->stats.rx_bytes);
448 	atomic_long_inc(&session->stats.rx_packets);
449 	atomic_long_add(length, &session->stats.rx_bytes);
450 
451 	if (L2TP_SKB_CB(skb)->has_seq) {
452 		/* Bump our Nr */
453 		session->nr++;
454 		session->nr &= session->nr_max;
455 		trace_session_seqnum_update(session);
456 	}
457 
458 	/* call private receive handler */
459 	if (session->recv_skb)
460 		(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
461 	else
462 		kfree_skb(skb);
463 }
464 
465 /* Dequeue skbs from the session's reorder_q, subject to packet order.
466  * Skbs that have been in the queue for too long are simply discarded.
467  */
468 static void l2tp_recv_dequeue(struct l2tp_session *session)
469 {
470 	struct sk_buff *skb;
471 	struct sk_buff *tmp;
472 
473 	/* If the pkt at the head of the queue has the nr that we
474 	 * expect to send up next, dequeue it and any other
475 	 * in-sequence packets behind it.
476 	 */
477 start:
478 	spin_lock_bh(&session->reorder_q.lock);
479 	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
480 		struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
481 
482 		/* If the packet has been pending on the queue for too long, discard it */
483 		if (time_after(jiffies, cb->expires)) {
484 			atomic_long_inc(&session->stats.rx_seq_discards);
485 			atomic_long_inc(&session->stats.rx_errors);
486 			trace_session_pkt_expired(session, cb->ns);
487 			session->reorder_skip = 1;
488 			__skb_unlink(skb, &session->reorder_q);
489 			kfree_skb(skb);
490 			continue;
491 		}
492 
493 		if (cb->has_seq) {
494 			if (session->reorder_skip) {
495 				session->reorder_skip = 0;
496 				session->nr = cb->ns;
497 				trace_session_seqnum_reset(session);
498 			}
499 			if (cb->ns != session->nr)
500 				goto out;
501 		}
502 		__skb_unlink(skb, &session->reorder_q);
503 
504 		/* Process the skb. We release the queue lock while we
505 		 * do so to let other contexts process the queue.
506 		 */
507 		spin_unlock_bh(&session->reorder_q.lock);
508 		l2tp_recv_dequeue_skb(session, skb);
509 		goto start;
510 	}
511 
512 out:
513 	spin_unlock_bh(&session->reorder_q.lock);
514 }
515 
516 static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
517 {
518 	u32 nws;
519 
520 	if (nr >= session->nr)
521 		nws = nr - session->nr;
522 	else
523 		nws = (session->nr_max + 1) - (session->nr - nr);
524 
525 	return nws < session->nr_window_size;
526 }
527 
528 /* If packet has sequence numbers, queue it if acceptable. Returns 0 if
529  * acceptable, else non-zero.
530  */
531 static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
532 {
533 	struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
534 
535 	if (!l2tp_seq_check_rx_window(session, cb->ns)) {
536 		/* Packet sequence number is outside allowed window.
537 		 * Discard it.
538 		 */
539 		trace_session_pkt_outside_rx_window(session, cb->ns);
540 		goto discard;
541 	}
542 
543 	if (session->reorder_timeout != 0) {
544 		/* Packet reordering enabled. Add skb to session's
545 		 * reorder queue, in order of ns.
546 		 */
547 		l2tp_recv_queue_skb(session, skb);
548 		goto out;
549 	}
550 
551 	/* Packet reordering disabled. Discard out-of-sequence packets, while
552 	 * tracking the number if in-sequence packets after the first OOS packet
553 	 * is seen. After nr_oos_count_max in-sequence packets, reset the
554 	 * sequence number to re-enable packet reception.
555 	 */
556 	if (cb->ns == session->nr) {
557 		skb_queue_tail(&session->reorder_q, skb);
558 	} else {
559 		u32 nr_oos = cb->ns;
560 		u32 nr_next = (session->nr_oos + 1) & session->nr_max;
561 
562 		if (nr_oos == nr_next)
563 			session->nr_oos_count++;
564 		else
565 			session->nr_oos_count = 0;
566 
567 		session->nr_oos = nr_oos;
568 		if (session->nr_oos_count > session->nr_oos_count_max) {
569 			session->reorder_skip = 1;
570 		}
571 		if (!session->reorder_skip) {
572 			atomic_long_inc(&session->stats.rx_seq_discards);
573 			trace_session_pkt_oos(session, cb->ns);
574 			goto discard;
575 		}
576 		skb_queue_tail(&session->reorder_q, skb);
577 	}
578 
579 out:
580 	return 0;
581 
582 discard:
583 	return 1;
584 }
585 
586 /* Do receive processing of L2TP data frames. We handle both L2TPv2
587  * and L2TPv3 data frames here.
588  *
589  * L2TPv2 Data Message Header
590  *
591  *  0                   1                   2                   3
592  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
593  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
594  * |T|L|x|x|S|x|O|P|x|x|x|x|  Ver  |          Length (opt)         |
595  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
596  * |           Tunnel ID           |           Session ID          |
597  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
598  * |             Ns (opt)          |             Nr (opt)          |
599  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
600  * |      Offset Size (opt)        |    Offset pad... (opt)
601  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
602  *
603  * Data frames are marked by T=0. All other fields are the same as
604  * those in L2TP control frames.
605  *
606  * L2TPv3 Data Message Header
607  *
608  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
609  * |                      L2TP Session Header                      |
610  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
611  * |                      L2-Specific Sublayer                     |
612  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
613  * |                        Tunnel Payload                      ...
614  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
615  *
616  * L2TPv3 Session Header Over IP
617  *
618  *  0                   1                   2                   3
619  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
620  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
621  * |                           Session ID                          |
622  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
623  * |               Cookie (optional, maximum 64 bits)...
624  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
625  *                                                                 |
626  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
627  *
628  * L2TPv3 L2-Specific Sublayer Format
629  *
630  *  0                   1                   2                   3
631  *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
632  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
633  * |x|S|x|x|x|x|x|x|              Sequence Number                  |
634  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
635  *
636  * Cookie value and sublayer format are negotiated with the peer when
637  * the session is set up. Unlike L2TPv2, we do not need to parse the
638  * packet header to determine if optional fields are present.
639  *
640  * Caller must already have parsed the frame and determined that it is
641  * a data (not control) frame before coming here. Fields up to the
642  * session-id have already been parsed and ptr points to the data
643  * after the session-id.
644  */
645 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
646 		      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
647 		      int length)
648 {
649 	struct l2tp_tunnel *tunnel = session->tunnel;
650 	int offset;
651 
652 	/* Parse and check optional cookie */
653 	if (session->peer_cookie_len > 0) {
654 		if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
655 			pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
656 					     tunnel->name, tunnel->tunnel_id,
657 					     session->session_id);
658 			atomic_long_inc(&session->stats.rx_cookie_discards);
659 			goto discard;
660 		}
661 		ptr += session->peer_cookie_len;
662 	}
663 
664 	/* Handle the optional sequence numbers. Sequence numbers are
665 	 * in different places for L2TPv2 and L2TPv3.
666 	 *
667 	 * If we are the LAC, enable/disable sequence numbers under
668 	 * the control of the LNS.  If no sequence numbers present but
669 	 * we were expecting them, discard frame.
670 	 */
671 	L2TP_SKB_CB(skb)->has_seq = 0;
672 	if (tunnel->version == L2TP_HDR_VER_2) {
673 		if (hdrflags & L2TP_HDRFLAG_S) {
674 			/* Store L2TP info in the skb */
675 			L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
676 			L2TP_SKB_CB(skb)->has_seq = 1;
677 			ptr += 2;
678 			/* Skip past nr in the header */
679 			ptr += 2;
680 
681 		}
682 	} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
683 		u32 l2h = ntohl(*(__be32 *)ptr);
684 
685 		if (l2h & 0x40000000) {
686 			/* Store L2TP info in the skb */
687 			L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
688 			L2TP_SKB_CB(skb)->has_seq = 1;
689 		}
690 		ptr += 4;
691 	}
692 
693 	if (L2TP_SKB_CB(skb)->has_seq) {
694 		/* Received a packet with sequence numbers. If we're the LAC,
695 		 * check if we sre sending sequence numbers and if not,
696 		 * configure it so.
697 		 */
698 		if (!session->lns_mode && !session->send_seq) {
699 			trace_session_seqnum_lns_enable(session);
700 			session->send_seq = 1;
701 			l2tp_session_set_header_len(session, tunnel->version);
702 		}
703 	} else {
704 		/* No sequence numbers.
705 		 * If user has configured mandatory sequence numbers, discard.
706 		 */
707 		if (session->recv_seq) {
708 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
709 					     session->name);
710 			atomic_long_inc(&session->stats.rx_seq_discards);
711 			goto discard;
712 		}
713 
714 		/* If we're the LAC and we're sending sequence numbers, the
715 		 * LNS has requested that we no longer send sequence numbers.
716 		 * If we're the LNS and we're sending sequence numbers, the
717 		 * LAC is broken. Discard the frame.
718 		 */
719 		if (!session->lns_mode && session->send_seq) {
720 			trace_session_seqnum_lns_disable(session);
721 			session->send_seq = 0;
722 			l2tp_session_set_header_len(session, tunnel->version);
723 		} else if (session->send_seq) {
724 			pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
725 					     session->name);
726 			atomic_long_inc(&session->stats.rx_seq_discards);
727 			goto discard;
728 		}
729 	}
730 
731 	/* Session data offset is defined only for L2TPv2 and is
732 	 * indicated by an optional 16-bit value in the header.
733 	 */
734 	if (tunnel->version == L2TP_HDR_VER_2) {
735 		/* If offset bit set, skip it. */
736 		if (hdrflags & L2TP_HDRFLAG_O) {
737 			offset = ntohs(*(__be16 *)ptr);
738 			ptr += 2 + offset;
739 		}
740 	}
741 
742 	offset = ptr - optr;
743 	if (!pskb_may_pull(skb, offset))
744 		goto discard;
745 
746 	__skb_pull(skb, offset);
747 
748 	/* Prepare skb for adding to the session's reorder_q.  Hold
749 	 * packets for max reorder_timeout or 1 second if not
750 	 * reordering.
751 	 */
752 	L2TP_SKB_CB(skb)->length = length;
753 	L2TP_SKB_CB(skb)->expires = jiffies +
754 		(session->reorder_timeout ? session->reorder_timeout : HZ);
755 
756 	/* Add packet to the session's receive queue. Reordering is done here, if
757 	 * enabled. Saved L2TP protocol info is stored in skb->sb[].
758 	 */
759 	if (L2TP_SKB_CB(skb)->has_seq) {
760 		if (l2tp_recv_data_seq(session, skb))
761 			goto discard;
762 	} else {
763 		/* No sequence numbers. Add the skb to the tail of the
764 		 * reorder queue. This ensures that it will be
765 		 * delivered after all previous sequenced skbs.
766 		 */
767 		skb_queue_tail(&session->reorder_q, skb);
768 	}
769 
770 	/* Try to dequeue as many skbs from reorder_q as we can. */
771 	l2tp_recv_dequeue(session);
772 
773 	return;
774 
775 discard:
776 	atomic_long_inc(&session->stats.rx_errors);
777 	kfree_skb(skb);
778 }
779 EXPORT_SYMBOL_GPL(l2tp_recv_common);
780 
781 /* Drop skbs from the session's reorder_q
782  */
783 static void l2tp_session_queue_purge(struct l2tp_session *session)
784 {
785 	struct sk_buff *skb = NULL;
786 
787 	while ((skb = skb_dequeue(&session->reorder_q))) {
788 		atomic_long_inc(&session->stats.rx_errors);
789 		kfree_skb(skb);
790 	}
791 }
792 
793 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
794  * here. The skb is not on a list when we get here.
795  * Returns 0 if the packet was a data packet and was successfully passed on.
796  * Returns 1 if the packet was not a good data packet and could not be
797  * forwarded.  All such packets are passed up to userspace to deal with.
798  */
799 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
800 {
801 	struct l2tp_session *session = NULL;
802 	unsigned char *ptr, *optr;
803 	u16 hdrflags;
804 	u32 tunnel_id, session_id;
805 	u16 version;
806 	int length;
807 
808 	/* UDP has verified checksum */
809 
810 	/* UDP always verifies the packet length. */
811 	__skb_pull(skb, sizeof(struct udphdr));
812 
813 	/* Short packet? */
814 	if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
815 		pr_debug_ratelimited("%s: recv short packet (len=%d)\n",
816 				     tunnel->name, skb->len);
817 		goto invalid;
818 	}
819 
820 	/* Point to L2TP header */
821 	optr = skb->data;
822 	ptr = skb->data;
823 
824 	/* Get L2TP header flags */
825 	hdrflags = ntohs(*(__be16 *)ptr);
826 
827 	/* Check protocol version */
828 	version = hdrflags & L2TP_HDR_VER_MASK;
829 	if (version != tunnel->version) {
830 		pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
831 				     tunnel->name, version, tunnel->version);
832 		goto invalid;
833 	}
834 
835 	/* Get length of L2TP packet */
836 	length = skb->len;
837 
838 	/* If type is control packet, it is handled by userspace. */
839 	if (hdrflags & L2TP_HDRFLAG_T)
840 		goto pass;
841 
842 	/* Skip flags */
843 	ptr += 2;
844 
845 	if (tunnel->version == L2TP_HDR_VER_2) {
846 		/* If length is present, skip it */
847 		if (hdrflags & L2TP_HDRFLAG_L)
848 			ptr += 2;
849 
850 		/* Extract tunnel and session ID */
851 		tunnel_id = ntohs(*(__be16 *)ptr);
852 		ptr += 2;
853 		session_id = ntohs(*(__be16 *)ptr);
854 		ptr += 2;
855 	} else {
856 		ptr += 2;	/* skip reserved bits */
857 		tunnel_id = tunnel->tunnel_id;
858 		session_id = ntohl(*(__be32 *)ptr);
859 		ptr += 4;
860 	}
861 
862 	/* Find the session context */
863 	session = l2tp_tunnel_get_session(tunnel, session_id);
864 	if (!session || !session->recv_skb) {
865 		if (session)
866 			l2tp_session_dec_refcount(session);
867 
868 		/* Not found? Pass to userspace to deal with */
869 		pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n",
870 				     tunnel->name, tunnel_id, session_id);
871 		goto pass;
872 	}
873 
874 	if (tunnel->version == L2TP_HDR_VER_3 &&
875 	    l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
876 		l2tp_session_dec_refcount(session);
877 		goto invalid;
878 	}
879 
880 	l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
881 	l2tp_session_dec_refcount(session);
882 
883 	return 0;
884 
885 invalid:
886 	atomic_long_inc(&tunnel->stats.rx_invalid);
887 
888 pass:
889 	/* Put UDP header back */
890 	__skb_push(skb, sizeof(struct udphdr));
891 
892 	return 1;
893 }
894 
895 /* UDP encapsulation receive handler. See net/ipv4/udp.c.
896  * Return codes:
897  * 0 : success.
898  * <0: error
899  * >0: skb should be passed up to userspace as UDP.
900  */
901 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
902 {
903 	struct l2tp_tunnel *tunnel;
904 
905 	/* Note that this is called from the encap_rcv hook inside an
906 	 * RCU-protected region, but without the socket being locked.
907 	 * Hence we use rcu_dereference_sk_user_data to access the
908 	 * tunnel data structure rather the usual l2tp_sk_to_tunnel
909 	 * accessor function.
910 	 */
911 	tunnel = rcu_dereference_sk_user_data(sk);
912 	if (!tunnel)
913 		goto pass_up;
914 	if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
915 		goto pass_up;
916 
917 	if (l2tp_udp_recv_core(tunnel, skb))
918 		goto pass_up;
919 
920 	return 0;
921 
922 pass_up:
923 	return 1;
924 }
925 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
926 
927 /************************************************************************
928  * Transmit handling
929  ***********************************************************************/
930 
931 /* Build an L2TP header for the session into the buffer provided.
932  */
933 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
934 {
935 	struct l2tp_tunnel *tunnel = session->tunnel;
936 	__be16 *bufp = buf;
937 	__be16 *optr = buf;
938 	u16 flags = L2TP_HDR_VER_2;
939 	u32 tunnel_id = tunnel->peer_tunnel_id;
940 	u32 session_id = session->peer_session_id;
941 
942 	if (session->send_seq)
943 		flags |= L2TP_HDRFLAG_S;
944 
945 	/* Setup L2TP header. */
946 	*bufp++ = htons(flags);
947 	*bufp++ = htons(tunnel_id);
948 	*bufp++ = htons(session_id);
949 	if (session->send_seq) {
950 		*bufp++ = htons(session->ns);
951 		*bufp++ = 0;
952 		session->ns++;
953 		session->ns &= 0xffff;
954 		trace_session_seqnum_update(session);
955 	}
956 
957 	return bufp - optr;
958 }
959 
960 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
961 {
962 	struct l2tp_tunnel *tunnel = session->tunnel;
963 	char *bufp = buf;
964 	char *optr = bufp;
965 
966 	/* Setup L2TP header. The header differs slightly for UDP and
967 	 * IP encapsulations. For UDP, there is 4 bytes of flags.
968 	 */
969 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
970 		u16 flags = L2TP_HDR_VER_3;
971 		*((__be16 *)bufp) = htons(flags);
972 		bufp += 2;
973 		*((__be16 *)bufp) = 0;
974 		bufp += 2;
975 	}
976 
977 	*((__be32 *)bufp) = htonl(session->peer_session_id);
978 	bufp += 4;
979 	if (session->cookie_len) {
980 		memcpy(bufp, &session->cookie[0], session->cookie_len);
981 		bufp += session->cookie_len;
982 	}
983 	if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
984 		u32 l2h = 0;
985 
986 		if (session->send_seq) {
987 			l2h = 0x40000000 | session->ns;
988 			session->ns++;
989 			session->ns &= 0xffffff;
990 			trace_session_seqnum_update(session);
991 		}
992 
993 		*((__be32 *)bufp) = htonl(l2h);
994 		bufp += 4;
995 	}
996 
997 	return bufp - optr;
998 }
999 
1000 /* Queue the packet to IP for output: tunnel socket lock must be held */
1001 static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1002 {
1003 	int err;
1004 
1005 	skb->ignore_df = 1;
1006 	skb_dst_drop(skb);
1007 #if IS_ENABLED(CONFIG_IPV6)
1008 	if (l2tp_sk_is_v6(tunnel->sock))
1009 		err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1010 	else
1011 #endif
1012 		err = ip_queue_xmit(tunnel->sock, skb, fl);
1013 
1014 	return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1015 }
1016 
1017 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1018 {
1019 	struct l2tp_tunnel *tunnel = session->tunnel;
1020 	unsigned int data_len = skb->len;
1021 	struct sock *sk = tunnel->sock;
1022 	int headroom, uhlen, udp_len;
1023 	int ret = NET_XMIT_SUCCESS;
1024 	struct inet_sock *inet;
1025 	struct udphdr *uh;
1026 
1027 	/* Check that there's enough headroom in the skb to insert IP,
1028 	 * UDP and L2TP headers. If not enough, expand it to
1029 	 * make room. Adjust truesize.
1030 	 */
1031 	uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1032 	headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1033 	if (skb_cow_head(skb, headroom)) {
1034 		kfree_skb(skb);
1035 		return NET_XMIT_DROP;
1036 	}
1037 
1038 	/* Setup L2TP header */
1039 	if (tunnel->version == L2TP_HDR_VER_2)
1040 		l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1041 	else
1042 		l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1043 
1044 	/* Reset skb netfilter state */
1045 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1046 	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1047 	nf_reset_ct(skb);
1048 
1049 	/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1050 	 * nested socket calls on the same lockdep socket class. This can
1051 	 * happen when data from a user socket is routed over l2tp, which uses
1052 	 * another userspace socket.
1053 	 */
1054 	spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1055 
1056 	if (sock_owned_by_user(sk)) {
1057 		kfree_skb(skb);
1058 		ret = NET_XMIT_DROP;
1059 		goto out_unlock;
1060 	}
1061 
1062 	/* The user-space may change the connection status for the user-space
1063 	 * provided socket at run time: we must check it under the socket lock
1064 	 */
1065 	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1066 		kfree_skb(skb);
1067 		ret = NET_XMIT_DROP;
1068 		goto out_unlock;
1069 	}
1070 
1071 	/* Report transmitted length before we add encap header, which keeps
1072 	 * statistics consistent for both UDP and IP encap tx/rx paths.
1073 	 */
1074 	*len = skb->len;
1075 
1076 	inet = inet_sk(sk);
1077 	switch (tunnel->encap) {
1078 	case L2TP_ENCAPTYPE_UDP:
1079 		/* Setup UDP header */
1080 		__skb_push(skb, sizeof(*uh));
1081 		skb_reset_transport_header(skb);
1082 		uh = udp_hdr(skb);
1083 		uh->source = inet->inet_sport;
1084 		uh->dest = inet->inet_dport;
1085 		udp_len = uhlen + session->hdr_len + data_len;
1086 		uh->len = htons(udp_len);
1087 
1088 		/* Calculate UDP checksum if configured to do so */
1089 #if IS_ENABLED(CONFIG_IPV6)
1090 		if (l2tp_sk_is_v6(sk))
1091 			udp6_set_csum(udp_get_no_check6_tx(sk),
1092 				      skb, &inet6_sk(sk)->saddr,
1093 				      &sk->sk_v6_daddr, udp_len);
1094 		else
1095 #endif
1096 			udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1097 				     inet->inet_daddr, udp_len);
1098 		break;
1099 
1100 	case L2TP_ENCAPTYPE_IP:
1101 		break;
1102 	}
1103 
1104 	ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1105 
1106 out_unlock:
1107 	spin_unlock(&sk->sk_lock.slock);
1108 
1109 	return ret;
1110 }
1111 
1112 /* If caller requires the skb to have a ppp header, the header must be
1113  * inserted in the skb data before calling this function.
1114  */
1115 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1116 {
1117 	unsigned int len = 0;
1118 	int ret;
1119 
1120 	ret = l2tp_xmit_core(session, skb, &len);
1121 	if (ret == NET_XMIT_SUCCESS) {
1122 		atomic_long_inc(&session->tunnel->stats.tx_packets);
1123 		atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1124 		atomic_long_inc(&session->stats.tx_packets);
1125 		atomic_long_add(len, &session->stats.tx_bytes);
1126 	} else {
1127 		atomic_long_inc(&session->tunnel->stats.tx_errors);
1128 		atomic_long_inc(&session->stats.tx_errors);
1129 	}
1130 	return ret;
1131 }
1132 EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1133 
1134 /*****************************************************************************
1135  * Tinnel and session create/destroy.
1136  *****************************************************************************/
1137 
1138 /* Tunnel socket destruct hook.
1139  * The tunnel context is deleted only when all session sockets have been
1140  * closed.
1141  */
1142 static void l2tp_tunnel_destruct(struct sock *sk)
1143 {
1144 	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1145 
1146 	if (!tunnel)
1147 		goto end;
1148 
1149 	/* Disable udp encapsulation */
1150 	switch (tunnel->encap) {
1151 	case L2TP_ENCAPTYPE_UDP:
1152 		/* No longer an encapsulation socket. See net/ipv4/udp.c */
1153 		WRITE_ONCE(udp_sk(sk)->encap_type, 0);
1154 		udp_sk(sk)->encap_rcv = NULL;
1155 		udp_sk(sk)->encap_destroy = NULL;
1156 		break;
1157 	case L2TP_ENCAPTYPE_IP:
1158 		break;
1159 	}
1160 
1161 	/* Remove hooks into tunnel socket */
1162 	write_lock_bh(&sk->sk_callback_lock);
1163 	sk->sk_destruct = tunnel->old_sk_destruct;
1164 	sk->sk_user_data = NULL;
1165 	write_unlock_bh(&sk->sk_callback_lock);
1166 
1167 	/* Call the original destructor */
1168 	if (sk->sk_destruct)
1169 		(*sk->sk_destruct)(sk);
1170 
1171 	kfree_rcu(tunnel, rcu);
1172 end:
1173 	return;
1174 }
1175 
1176 /* Remove an l2tp session from l2tp_core's hash lists. */
1177 static void l2tp_session_unhash(struct l2tp_session *session)
1178 {
1179 	struct l2tp_tunnel *tunnel = session->tunnel;
1180 
1181 	/* Remove the session from core hashes */
1182 	if (tunnel) {
1183 		/* Remove from the per-tunnel hash */
1184 		spin_lock_bh(&tunnel->hlist_lock);
1185 		hlist_del_init_rcu(&session->hlist);
1186 		spin_unlock_bh(&tunnel->hlist_lock);
1187 
1188 		/* For L2TPv3 we have a per-net hash: remove from there, too */
1189 		if (tunnel->version != L2TP_HDR_VER_2) {
1190 			struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1191 
1192 			spin_lock_bh(&pn->l2tp_session_hlist_lock);
1193 			hlist_del_init_rcu(&session->global_hlist);
1194 			spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1195 		}
1196 
1197 		synchronize_rcu();
1198 	}
1199 }
1200 
1201 /* When the tunnel is closed, all the attached sessions need to go too.
1202  */
1203 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1204 {
1205 	struct l2tp_session *session;
1206 	int hash;
1207 
1208 	spin_lock_bh(&tunnel->hlist_lock);
1209 	tunnel->acpt_newsess = false;
1210 	for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1211 again:
1212 		hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
1213 			hlist_del_init_rcu(&session->hlist);
1214 
1215 			spin_unlock_bh(&tunnel->hlist_lock);
1216 			l2tp_session_delete(session);
1217 			spin_lock_bh(&tunnel->hlist_lock);
1218 
1219 			/* Now restart from the beginning of this hash
1220 			 * chain.  We always remove a session from the
1221 			 * list so we are guaranteed to make forward
1222 			 * progress.
1223 			 */
1224 			goto again;
1225 		}
1226 	}
1227 	spin_unlock_bh(&tunnel->hlist_lock);
1228 }
1229 
1230 /* Tunnel socket destroy hook for UDP encapsulation */
1231 static void l2tp_udp_encap_destroy(struct sock *sk)
1232 {
1233 	struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
1234 
1235 	if (tunnel)
1236 		l2tp_tunnel_delete(tunnel);
1237 }
1238 
1239 static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1240 {
1241 	struct l2tp_net *pn = l2tp_pernet(net);
1242 
1243 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1244 	idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1245 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1246 }
1247 
1248 /* Workqueue tunnel deletion function */
1249 static void l2tp_tunnel_del_work(struct work_struct *work)
1250 {
1251 	struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1252 						  del_work);
1253 	struct sock *sk = tunnel->sock;
1254 	struct socket *sock = sk->sk_socket;
1255 
1256 	l2tp_tunnel_closeall(tunnel);
1257 
1258 	/* If the tunnel socket was created within the kernel, use
1259 	 * the sk API to release it here.
1260 	 */
1261 	if (tunnel->fd < 0) {
1262 		if (sock) {
1263 			kernel_sock_shutdown(sock, SHUT_RDWR);
1264 			sock_release(sock);
1265 		}
1266 	}
1267 
1268 	l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1269 	/* drop initial ref */
1270 	l2tp_tunnel_dec_refcount(tunnel);
1271 
1272 	/* drop workqueue ref */
1273 	l2tp_tunnel_dec_refcount(tunnel);
1274 }
1275 
1276 /* Create a socket for the tunnel, if one isn't set up by
1277  * userspace. This is used for static tunnels where there is no
1278  * managing L2TP daemon.
1279  *
1280  * Since we don't want these sockets to keep a namespace alive by
1281  * themselves, we drop the socket's namespace refcount after creation.
1282  * These sockets are freed when the namespace exits using the pernet
1283  * exit hook.
1284  */
1285 static int l2tp_tunnel_sock_create(struct net *net,
1286 				   u32 tunnel_id,
1287 				   u32 peer_tunnel_id,
1288 				   struct l2tp_tunnel_cfg *cfg,
1289 				   struct socket **sockp)
1290 {
1291 	int err = -EINVAL;
1292 	struct socket *sock = NULL;
1293 	struct udp_port_cfg udp_conf;
1294 
1295 	switch (cfg->encap) {
1296 	case L2TP_ENCAPTYPE_UDP:
1297 		memset(&udp_conf, 0, sizeof(udp_conf));
1298 
1299 #if IS_ENABLED(CONFIG_IPV6)
1300 		if (cfg->local_ip6 && cfg->peer_ip6) {
1301 			udp_conf.family = AF_INET6;
1302 			memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1303 			       sizeof(udp_conf.local_ip6));
1304 			memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1305 			       sizeof(udp_conf.peer_ip6));
1306 			udp_conf.use_udp6_tx_checksums =
1307 			  !cfg->udp6_zero_tx_checksums;
1308 			udp_conf.use_udp6_rx_checksums =
1309 			  !cfg->udp6_zero_rx_checksums;
1310 		} else
1311 #endif
1312 		{
1313 			udp_conf.family = AF_INET;
1314 			udp_conf.local_ip = cfg->local_ip;
1315 			udp_conf.peer_ip = cfg->peer_ip;
1316 			udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1317 		}
1318 
1319 		udp_conf.local_udp_port = htons(cfg->local_udp_port);
1320 		udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1321 
1322 		err = udp_sock_create(net, &udp_conf, &sock);
1323 		if (err < 0)
1324 			goto out;
1325 
1326 		break;
1327 
1328 	case L2TP_ENCAPTYPE_IP:
1329 #if IS_ENABLED(CONFIG_IPV6)
1330 		if (cfg->local_ip6 && cfg->peer_ip6) {
1331 			struct sockaddr_l2tpip6 ip6_addr = {0};
1332 
1333 			err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1334 					       IPPROTO_L2TP, &sock);
1335 			if (err < 0)
1336 				goto out;
1337 
1338 			ip6_addr.l2tp_family = AF_INET6;
1339 			memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1340 			       sizeof(ip6_addr.l2tp_addr));
1341 			ip6_addr.l2tp_conn_id = tunnel_id;
1342 			err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1343 					  sizeof(ip6_addr));
1344 			if (err < 0)
1345 				goto out;
1346 
1347 			ip6_addr.l2tp_family = AF_INET6;
1348 			memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1349 			       sizeof(ip6_addr.l2tp_addr));
1350 			ip6_addr.l2tp_conn_id = peer_tunnel_id;
1351 			err = kernel_connect(sock,
1352 					     (struct sockaddr *)&ip6_addr,
1353 					     sizeof(ip6_addr), 0);
1354 			if (err < 0)
1355 				goto out;
1356 		} else
1357 #endif
1358 		{
1359 			struct sockaddr_l2tpip ip_addr = {0};
1360 
1361 			err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1362 					       IPPROTO_L2TP, &sock);
1363 			if (err < 0)
1364 				goto out;
1365 
1366 			ip_addr.l2tp_family = AF_INET;
1367 			ip_addr.l2tp_addr = cfg->local_ip;
1368 			ip_addr.l2tp_conn_id = tunnel_id;
1369 			err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1370 					  sizeof(ip_addr));
1371 			if (err < 0)
1372 				goto out;
1373 
1374 			ip_addr.l2tp_family = AF_INET;
1375 			ip_addr.l2tp_addr = cfg->peer_ip;
1376 			ip_addr.l2tp_conn_id = peer_tunnel_id;
1377 			err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1378 					     sizeof(ip_addr), 0);
1379 			if (err < 0)
1380 				goto out;
1381 		}
1382 		break;
1383 
1384 	default:
1385 		goto out;
1386 	}
1387 
1388 out:
1389 	*sockp = sock;
1390 	if (err < 0 && sock) {
1391 		kernel_sock_shutdown(sock, SHUT_RDWR);
1392 		sock_release(sock);
1393 		*sockp = NULL;
1394 	}
1395 
1396 	return err;
1397 }
1398 
1399 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1400 		       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1401 {
1402 	struct l2tp_tunnel *tunnel = NULL;
1403 	int err;
1404 	enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1405 
1406 	if (cfg)
1407 		encap = cfg->encap;
1408 
1409 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1410 	if (!tunnel) {
1411 		err = -ENOMEM;
1412 		goto err;
1413 	}
1414 
1415 	tunnel->version = version;
1416 	tunnel->tunnel_id = tunnel_id;
1417 	tunnel->peer_tunnel_id = peer_tunnel_id;
1418 
1419 	tunnel->magic = L2TP_TUNNEL_MAGIC;
1420 	sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1421 	spin_lock_init(&tunnel->hlist_lock);
1422 	tunnel->acpt_newsess = true;
1423 
1424 	tunnel->encap = encap;
1425 
1426 	refcount_set(&tunnel->ref_count, 1);
1427 	tunnel->fd = fd;
1428 
1429 	/* Init delete workqueue struct */
1430 	INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1431 
1432 	INIT_LIST_HEAD(&tunnel->list);
1433 
1434 	err = 0;
1435 err:
1436 	if (tunnelp)
1437 		*tunnelp = tunnel;
1438 
1439 	return err;
1440 }
1441 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1442 
1443 static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1444 				enum l2tp_encap_type encap)
1445 {
1446 	if (!net_eq(sock_net(sk), net))
1447 		return -EINVAL;
1448 
1449 	if (sk->sk_type != SOCK_DGRAM)
1450 		return -EPROTONOSUPPORT;
1451 
1452 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1453 		return -EPROTONOSUPPORT;
1454 
1455 	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1456 	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1457 		return -EPROTONOSUPPORT;
1458 
1459 	if (sk->sk_user_data)
1460 		return -EBUSY;
1461 
1462 	return 0;
1463 }
1464 
1465 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1466 			 struct l2tp_tunnel_cfg *cfg)
1467 {
1468 	struct l2tp_net *pn = l2tp_pernet(net);
1469 	u32 tunnel_id = tunnel->tunnel_id;
1470 	struct socket *sock;
1471 	struct sock *sk;
1472 	int ret;
1473 
1474 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1475 	ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1476 			    GFP_ATOMIC);
1477 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1478 	if (ret)
1479 		return ret == -ENOSPC ? -EEXIST : ret;
1480 
1481 	if (tunnel->fd < 0) {
1482 		ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1483 					      tunnel->peer_tunnel_id, cfg,
1484 					      &sock);
1485 		if (ret < 0)
1486 			goto err;
1487 	} else {
1488 		sock = sockfd_lookup(tunnel->fd, &ret);
1489 		if (!sock)
1490 			goto err;
1491 	}
1492 
1493 	sk = sock->sk;
1494 	lock_sock(sk);
1495 	write_lock_bh(&sk->sk_callback_lock);
1496 	ret = l2tp_validate_socket(sk, net, tunnel->encap);
1497 	if (ret < 0)
1498 		goto err_inval_sock;
1499 	rcu_assign_sk_user_data(sk, tunnel);
1500 	write_unlock_bh(&sk->sk_callback_lock);
1501 
1502 	if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1503 		struct udp_tunnel_sock_cfg udp_cfg = {
1504 			.sk_user_data = tunnel,
1505 			.encap_type = UDP_ENCAP_L2TPINUDP,
1506 			.encap_rcv = l2tp_udp_encap_recv,
1507 			.encap_destroy = l2tp_udp_encap_destroy,
1508 		};
1509 
1510 		setup_udp_tunnel_sock(net, sock, &udp_cfg);
1511 	}
1512 
1513 	tunnel->old_sk_destruct = sk->sk_destruct;
1514 	sk->sk_destruct = &l2tp_tunnel_destruct;
1515 	sk->sk_allocation = GFP_ATOMIC;
1516 	release_sock(sk);
1517 
1518 	sock_hold(sk);
1519 	tunnel->sock = sk;
1520 	tunnel->l2tp_net = net;
1521 
1522 	spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1523 	idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1524 	spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1525 
1526 	trace_register_tunnel(tunnel);
1527 
1528 	if (tunnel->fd >= 0)
1529 		sockfd_put(sock);
1530 
1531 	return 0;
1532 
1533 err_inval_sock:
1534 	write_unlock_bh(&sk->sk_callback_lock);
1535 	release_sock(sk);
1536 
1537 	if (tunnel->fd < 0)
1538 		sock_release(sock);
1539 	else
1540 		sockfd_put(sock);
1541 err:
1542 	l2tp_tunnel_remove(net, tunnel);
1543 	return ret;
1544 }
1545 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1546 
1547 /* This function is used by the netlink TUNNEL_DELETE command.
1548  */
1549 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1550 {
1551 	if (!test_and_set_bit(0, &tunnel->dead)) {
1552 		trace_delete_tunnel(tunnel);
1553 		l2tp_tunnel_inc_refcount(tunnel);
1554 		queue_work(l2tp_wq, &tunnel->del_work);
1555 	}
1556 }
1557 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1558 
1559 void l2tp_session_delete(struct l2tp_session *session)
1560 {
1561 	if (test_and_set_bit(0, &session->dead))
1562 		return;
1563 
1564 	trace_delete_session(session);
1565 	l2tp_session_unhash(session);
1566 	l2tp_session_queue_purge(session);
1567 	if (session->session_close)
1568 		(*session->session_close)(session);
1569 
1570 	l2tp_session_dec_refcount(session);
1571 }
1572 EXPORT_SYMBOL_GPL(l2tp_session_delete);
1573 
1574 /* We come here whenever a session's send_seq, cookie_len or
1575  * l2specific_type parameters are set.
1576  */
1577 void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1578 {
1579 	if (version == L2TP_HDR_VER_2) {
1580 		session->hdr_len = 6;
1581 		if (session->send_seq)
1582 			session->hdr_len += 4;
1583 	} else {
1584 		session->hdr_len = 4 + session->cookie_len;
1585 		session->hdr_len += l2tp_get_l2specific_len(session);
1586 		if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1587 			session->hdr_len += 4;
1588 	}
1589 }
1590 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1591 
1592 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1593 					 u32 peer_session_id, struct l2tp_session_cfg *cfg)
1594 {
1595 	struct l2tp_session *session;
1596 
1597 	session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1598 	if (session) {
1599 		session->magic = L2TP_SESSION_MAGIC;
1600 		session->tunnel = tunnel;
1601 
1602 		session->session_id = session_id;
1603 		session->peer_session_id = peer_session_id;
1604 		session->nr = 0;
1605 		if (tunnel->version == L2TP_HDR_VER_2)
1606 			session->nr_max = 0xffff;
1607 		else
1608 			session->nr_max = 0xffffff;
1609 		session->nr_window_size = session->nr_max / 2;
1610 		session->nr_oos_count_max = 4;
1611 
1612 		/* Use NR of first received packet */
1613 		session->reorder_skip = 1;
1614 
1615 		sprintf(&session->name[0], "sess %u/%u",
1616 			tunnel->tunnel_id, session->session_id);
1617 
1618 		skb_queue_head_init(&session->reorder_q);
1619 
1620 		INIT_HLIST_NODE(&session->hlist);
1621 		INIT_HLIST_NODE(&session->global_hlist);
1622 
1623 		if (cfg) {
1624 			session->pwtype = cfg->pw_type;
1625 			session->send_seq = cfg->send_seq;
1626 			session->recv_seq = cfg->recv_seq;
1627 			session->lns_mode = cfg->lns_mode;
1628 			session->reorder_timeout = cfg->reorder_timeout;
1629 			session->l2specific_type = cfg->l2specific_type;
1630 			session->cookie_len = cfg->cookie_len;
1631 			memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1632 			session->peer_cookie_len = cfg->peer_cookie_len;
1633 			memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1634 		}
1635 
1636 		l2tp_session_set_header_len(session, tunnel->version);
1637 
1638 		refcount_set(&session->ref_count, 1);
1639 
1640 		return session;
1641 	}
1642 
1643 	return ERR_PTR(-ENOMEM);
1644 }
1645 EXPORT_SYMBOL_GPL(l2tp_session_create);
1646 
1647 /*****************************************************************************
1648  * Init and cleanup
1649  *****************************************************************************/
1650 
1651 static __net_init int l2tp_init_net(struct net *net)
1652 {
1653 	struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1654 	int hash;
1655 
1656 	idr_init(&pn->l2tp_tunnel_idr);
1657 	spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1658 
1659 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1660 		INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1661 
1662 	spin_lock_init(&pn->l2tp_session_hlist_lock);
1663 
1664 	return 0;
1665 }
1666 
1667 static __net_exit void l2tp_exit_net(struct net *net)
1668 {
1669 	struct l2tp_net *pn = l2tp_pernet(net);
1670 	struct l2tp_tunnel *tunnel = NULL;
1671 	unsigned long tunnel_id, tmp;
1672 	int hash;
1673 
1674 	rcu_read_lock_bh();
1675 	idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1676 		if (tunnel)
1677 			l2tp_tunnel_delete(tunnel);
1678 	}
1679 	rcu_read_unlock_bh();
1680 
1681 	if (l2tp_wq)
1682 		flush_workqueue(l2tp_wq);
1683 	rcu_barrier();
1684 
1685 	for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1686 		WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
1687 	idr_destroy(&pn->l2tp_tunnel_idr);
1688 }
1689 
1690 static struct pernet_operations l2tp_net_ops = {
1691 	.init = l2tp_init_net,
1692 	.exit = l2tp_exit_net,
1693 	.id   = &l2tp_net_id,
1694 	.size = sizeof(struct l2tp_net),
1695 };
1696 
1697 static int __init l2tp_init(void)
1698 {
1699 	int rc = 0;
1700 
1701 	rc = register_pernet_device(&l2tp_net_ops);
1702 	if (rc)
1703 		goto out;
1704 
1705 	l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1706 	if (!l2tp_wq) {
1707 		pr_err("alloc_workqueue failed\n");
1708 		unregister_pernet_device(&l2tp_net_ops);
1709 		rc = -ENOMEM;
1710 		goto out;
1711 	}
1712 
1713 	pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1714 
1715 out:
1716 	return rc;
1717 }
1718 
1719 static void __exit l2tp_exit(void)
1720 {
1721 	unregister_pernet_device(&l2tp_net_ops);
1722 	if (l2tp_wq) {
1723 		destroy_workqueue(l2tp_wq);
1724 		l2tp_wq = NULL;
1725 	}
1726 }
1727 
1728 module_init(l2tp_init);
1729 module_exit(l2tp_exit);
1730 
1731 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1732 MODULE_DESCRIPTION("L2TP core");
1733 MODULE_LICENSE("GPL");
1734 MODULE_VERSION(L2TP_DRV_VERSION);
1735