1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* L2TPv3 IP encapsulation support for IPv6
3 *
4 * Copyright (c) 2012 Katalix Systems Ltd
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/icmp.h>
10 #include <linux/module.h>
11 #include <linux/skbuff.h>
12 #include <linux/random.h>
13 #include <linux/socket.h>
14 #include <linux/l2tp.h>
15 #include <linux/in.h>
16 #include <linux/in6.h>
17 #include <net/sock.h>
18 #include <net/ip.h>
19 #include <net/icmp.h>
20 #include <net/udp.h>
21 #include <net/inet_common.h>
22 #include <net/tcp_states.h>
23 #include <net/protocol.h>
24 #include <net/xfrm.h>
25
26 #include <net/transp_v6.h>
27 #include <net/addrconf.h>
28 #include <net/ip6_route.h>
29
30 #include "l2tp_core.h"
31
32 struct l2tp_ip6_sock {
33 /* inet_sock has to be the first member of l2tp_ip6_sock */
34 struct inet_sock inet;
35
36 u32 conn_id;
37 u32 peer_conn_id;
38
39 struct ipv6_pinfo inet6;
40 };
41
42 static DEFINE_RWLOCK(l2tp_ip6_lock);
43 static struct hlist_head l2tp_ip6_table;
44 static struct hlist_head l2tp_ip6_bind_table;
45
l2tp_ip6_sk(const struct sock * sk)46 static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
47 {
48 return (struct l2tp_ip6_sock *)sk;
49 }
50
__l2tp_ip6_bind_lookup(const struct net * net,const struct in6_addr * laddr,const struct in6_addr * raddr,int dif,u32 tunnel_id)51 static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
52 const struct in6_addr *laddr,
53 const struct in6_addr *raddr,
54 int dif, u32 tunnel_id)
55 {
56 struct sock *sk;
57
58 sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
59 const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
60 const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
61 const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
62 int bound_dev_if;
63
64 if (!net_eq(sock_net(sk), net))
65 continue;
66
67 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
68 if (bound_dev_if && dif && bound_dev_if != dif)
69 continue;
70
71 if (sk_laddr && !ipv6_addr_any(sk_laddr) &&
72 !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr))
73 continue;
74
75 if (!ipv6_addr_any(sk_raddr) && raddr &&
76 !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr))
77 continue;
78
79 if (l2tp->conn_id != tunnel_id)
80 continue;
81
82 goto found;
83 }
84
85 sk = NULL;
86 found:
87 return sk;
88 }
89
90 /* When processing receive frames, there are two cases to
91 * consider. Data frames consist of a non-zero session-id and an
92 * optional cookie. Control frames consist of a regular L2TP header
93 * preceded by 32-bits of zeros.
94 *
95 * L2TPv3 Session Header Over IP
96 *
97 * 0 1 2 3
98 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * | Session ID |
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * | Cookie (optional, maximum 64 bits)...
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 *
107 * L2TPv3 Control Message Header Over IP
108 *
109 * 0 1 2 3
110 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 * | (32 bits of zeros) |
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * | Control Connection ID |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * | Ns | Nr |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 *
121 * All control frames are passed to userspace.
122 */
l2tp_ip6_recv(struct sk_buff * skb)123 static int l2tp_ip6_recv(struct sk_buff *skb)
124 {
125 struct net *net = dev_net(skb->dev);
126 struct sock *sk;
127 u32 session_id;
128 u32 tunnel_id;
129 unsigned char *ptr, *optr;
130 struct l2tp_session *session;
131 struct l2tp_tunnel *tunnel = NULL;
132 struct ipv6hdr *iph;
133
134 if (!pskb_may_pull(skb, 4))
135 goto discard;
136
137 /* Point to L2TP header */
138 optr = skb->data;
139 ptr = skb->data;
140 session_id = ntohl(*((__be32 *)ptr));
141 ptr += 4;
142
143 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
144 * the session_id. If it is 0, the packet is a L2TP control
145 * frame and the session_id value can be discarded.
146 */
147 if (session_id == 0) {
148 __skb_pull(skb, 4);
149 goto pass_up;
150 }
151
152 /* Ok, this is a data packet. Lookup the session. */
153 session = l2tp_session_get(net, session_id);
154 if (!session)
155 goto discard;
156
157 tunnel = session->tunnel;
158 if (!tunnel)
159 goto discard_sess;
160
161 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
162 goto discard_sess;
163
164 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
165 l2tp_session_dec_refcount(session);
166
167 return 0;
168
169 pass_up:
170 /* Get the tunnel_id from the L2TP header */
171 if (!pskb_may_pull(skb, 12))
172 goto discard;
173
174 if ((skb->data[0] & 0xc0) != 0xc0)
175 goto discard;
176
177 tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
178 iph = ipv6_hdr(skb);
179
180 read_lock_bh(&l2tp_ip6_lock);
181 sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
182 inet6_iif(skb), tunnel_id);
183 if (!sk) {
184 read_unlock_bh(&l2tp_ip6_lock);
185 goto discard;
186 }
187 sock_hold(sk);
188 read_unlock_bh(&l2tp_ip6_lock);
189
190 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
191 goto discard_put;
192
193 nf_reset_ct(skb);
194
195 return sk_receive_skb(sk, skb, 1);
196
197 discard_sess:
198 l2tp_session_dec_refcount(session);
199 goto discard;
200
201 discard_put:
202 sock_put(sk);
203
204 discard:
205 kfree_skb(skb);
206 return 0;
207 }
208
l2tp_ip6_hash(struct sock * sk)209 static int l2tp_ip6_hash(struct sock *sk)
210 {
211 if (sk_unhashed(sk)) {
212 write_lock_bh(&l2tp_ip6_lock);
213 sk_add_node(sk, &l2tp_ip6_table);
214 write_unlock_bh(&l2tp_ip6_lock);
215 }
216 return 0;
217 }
218
l2tp_ip6_unhash(struct sock * sk)219 static void l2tp_ip6_unhash(struct sock *sk)
220 {
221 if (sk_unhashed(sk))
222 return;
223 write_lock_bh(&l2tp_ip6_lock);
224 sk_del_node_init(sk);
225 write_unlock_bh(&l2tp_ip6_lock);
226 }
227
l2tp_ip6_open(struct sock * sk)228 static int l2tp_ip6_open(struct sock *sk)
229 {
230 /* Prevent autobind. We don't have ports. */
231 inet_sk(sk)->inet_num = IPPROTO_L2TP;
232
233 l2tp_ip6_hash(sk);
234 return 0;
235 }
236
l2tp_ip6_close(struct sock * sk,long timeout)237 static void l2tp_ip6_close(struct sock *sk, long timeout)
238 {
239 write_lock_bh(&l2tp_ip6_lock);
240 hlist_del_init(&sk->sk_bind_node);
241 sk_del_node_init(sk);
242 write_unlock_bh(&l2tp_ip6_lock);
243
244 sk_common_release(sk);
245 }
246
l2tp_ip6_destroy_sock(struct sock * sk)247 static void l2tp_ip6_destroy_sock(struct sock *sk)
248 {
249 struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
250
251 lock_sock(sk);
252 ip6_flush_pending_frames(sk);
253 release_sock(sk);
254
255 if (tunnel)
256 l2tp_tunnel_delete(tunnel);
257 }
258
l2tp_ip6_bind(struct sock * sk,struct sockaddr * uaddr,int addr_len)259 static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
260 {
261 struct inet_sock *inet = inet_sk(sk);
262 struct ipv6_pinfo *np = inet6_sk(sk);
263 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
264 struct net *net = sock_net(sk);
265 __be32 v4addr = 0;
266 int bound_dev_if;
267 int addr_type;
268 int err;
269
270 if (addr->l2tp_family != AF_INET6)
271 return -EINVAL;
272 if (addr_len < sizeof(*addr))
273 return -EINVAL;
274
275 addr_type = ipv6_addr_type(&addr->l2tp_addr);
276
277 /* l2tp_ip6 sockets are IPv6 only */
278 if (addr_type == IPV6_ADDR_MAPPED)
279 return -EADDRNOTAVAIL;
280
281 /* L2TP is point-point, not multicast */
282 if (addr_type & IPV6_ADDR_MULTICAST)
283 return -EADDRNOTAVAIL;
284
285 lock_sock(sk);
286
287 err = -EINVAL;
288 if (!sock_flag(sk, SOCK_ZAPPED))
289 goto out_unlock;
290
291 if (sk->sk_state != TCP_CLOSE)
292 goto out_unlock;
293
294 bound_dev_if = sk->sk_bound_dev_if;
295
296 /* Check if the address belongs to the host. */
297 rcu_read_lock();
298 if (addr_type != IPV6_ADDR_ANY) {
299 struct net_device *dev = NULL;
300
301 if (addr_type & IPV6_ADDR_LINKLOCAL) {
302 if (addr->l2tp_scope_id)
303 bound_dev_if = addr->l2tp_scope_id;
304
305 /* Binding to link-local address requires an
306 * interface.
307 */
308 if (!bound_dev_if)
309 goto out_unlock_rcu;
310
311 err = -ENODEV;
312 dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
313 if (!dev)
314 goto out_unlock_rcu;
315 }
316
317 /* ipv4 addr of the socket is invalid. Only the
318 * unspecified and mapped address have a v4 equivalent.
319 */
320 v4addr = LOOPBACK4_IPV6;
321 err = -EADDRNOTAVAIL;
322 if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
323 goto out_unlock_rcu;
324 }
325 rcu_read_unlock();
326
327 write_lock_bh(&l2tp_ip6_lock);
328 if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
329 addr->l2tp_conn_id)) {
330 write_unlock_bh(&l2tp_ip6_lock);
331 err = -EADDRINUSE;
332 goto out_unlock;
333 }
334
335 inet->inet_saddr = v4addr;
336 inet->inet_rcv_saddr = v4addr;
337 sk->sk_bound_dev_if = bound_dev_if;
338 sk->sk_v6_rcv_saddr = addr->l2tp_addr;
339 np->saddr = addr->l2tp_addr;
340
341 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
342
343 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
344 sk_del_node_init(sk);
345 write_unlock_bh(&l2tp_ip6_lock);
346
347 sock_reset_flag(sk, SOCK_ZAPPED);
348 release_sock(sk);
349 return 0;
350
351 out_unlock_rcu:
352 rcu_read_unlock();
353 out_unlock:
354 release_sock(sk);
355
356 return err;
357 }
358
l2tp_ip6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)359 static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
360 int addr_len)
361 {
362 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
363 struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
364 struct in6_addr *daddr;
365 int addr_type;
366 int rc;
367
368 if (addr_len < sizeof(*lsa))
369 return -EINVAL;
370
371 if (usin->sin6_family != AF_INET6)
372 return -EINVAL;
373
374 addr_type = ipv6_addr_type(&usin->sin6_addr);
375 if (addr_type & IPV6_ADDR_MULTICAST)
376 return -EINVAL;
377
378 if (addr_type & IPV6_ADDR_MAPPED) {
379 daddr = &usin->sin6_addr;
380 if (ipv4_is_multicast(daddr->s6_addr32[3]))
381 return -EINVAL;
382 }
383
384 lock_sock(sk);
385
386 /* Must bind first - autobinding does not work */
387 if (sock_flag(sk, SOCK_ZAPPED)) {
388 rc = -EINVAL;
389 goto out_sk;
390 }
391
392 rc = __ip6_datagram_connect(sk, uaddr, addr_len);
393 if (rc < 0)
394 goto out_sk;
395
396 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
397
398 write_lock_bh(&l2tp_ip6_lock);
399 hlist_del_init(&sk->sk_bind_node);
400 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
401 write_unlock_bh(&l2tp_ip6_lock);
402
403 out_sk:
404 release_sock(sk);
405
406 return rc;
407 }
408
l2tp_ip6_disconnect(struct sock * sk,int flags)409 static int l2tp_ip6_disconnect(struct sock *sk, int flags)
410 {
411 if (sock_flag(sk, SOCK_ZAPPED))
412 return 0;
413
414 return __udp_disconnect(sk, flags);
415 }
416
l2tp_ip6_getname(struct socket * sock,struct sockaddr * uaddr,int peer)417 static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
418 int peer)
419 {
420 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
421 struct sock *sk = sock->sk;
422 struct ipv6_pinfo *np = inet6_sk(sk);
423 struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
424
425 lsa->l2tp_family = AF_INET6;
426 lsa->l2tp_flowinfo = 0;
427 lsa->l2tp_scope_id = 0;
428 lsa->l2tp_unused = 0;
429 if (peer) {
430 if (!lsk->peer_conn_id)
431 return -ENOTCONN;
432 lsa->l2tp_conn_id = lsk->peer_conn_id;
433 lsa->l2tp_addr = sk->sk_v6_daddr;
434 if (np->sndflow)
435 lsa->l2tp_flowinfo = np->flow_label;
436 } else {
437 if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
438 lsa->l2tp_addr = np->saddr;
439 else
440 lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
441
442 lsa->l2tp_conn_id = lsk->conn_id;
443 }
444 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
445 lsa->l2tp_scope_id = READ_ONCE(sk->sk_bound_dev_if);
446 return sizeof(*lsa);
447 }
448
l2tp_ip6_backlog_recv(struct sock * sk,struct sk_buff * skb)449 static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
450 {
451 int rc;
452
453 /* Charge it to the socket, dropping if the queue is full. */
454 rc = sock_queue_rcv_skb(sk, skb);
455 if (rc < 0)
456 goto drop;
457
458 return 0;
459
460 drop:
461 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
462 kfree_skb(skb);
463 return -1;
464 }
465
l2tp_ip6_push_pending_frames(struct sock * sk)466 static int l2tp_ip6_push_pending_frames(struct sock *sk)
467 {
468 struct sk_buff *skb;
469 __be32 *transhdr = NULL;
470 int err = 0;
471
472 skb = skb_peek(&sk->sk_write_queue);
473 if (!skb)
474 goto out;
475
476 transhdr = (__be32 *)skb_transport_header(skb);
477 *transhdr = 0;
478
479 err = ip6_push_pending_frames(sk);
480
481 out:
482 return err;
483 }
484
485 /* Userspace will call sendmsg() on the tunnel socket to send L2TP
486 * control frames.
487 */
l2tp_ip6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)488 static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
489 {
490 struct ipv6_txoptions opt_space;
491 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
492 struct in6_addr *daddr, *final_p, final;
493 struct ipv6_pinfo *np = inet6_sk(sk);
494 struct ipv6_txoptions *opt_to_free = NULL;
495 struct ipv6_txoptions *opt = NULL;
496 struct ip6_flowlabel *flowlabel = NULL;
497 struct dst_entry *dst = NULL;
498 struct flowi6 fl6;
499 struct ipcm6_cookie ipc6;
500 int addr_len = msg->msg_namelen;
501 int transhdrlen = 4; /* zero session-id */
502 int ulen;
503 int err;
504
505 /* Rough check on arithmetic overflow,
506 * better check is made in ip6_append_data().
507 */
508 if (len > INT_MAX - transhdrlen)
509 return -EMSGSIZE;
510
511 /* Mirror BSD error message compatibility */
512 if (msg->msg_flags & MSG_OOB)
513 return -EOPNOTSUPP;
514
515 /* Get and verify the address */
516 memset(&fl6, 0, sizeof(fl6));
517
518 fl6.flowi6_mark = READ_ONCE(sk->sk_mark);
519 fl6.flowi6_uid = sk->sk_uid;
520
521 ipcm6_init(&ipc6);
522
523 if (lsa) {
524 if (addr_len < SIN6_LEN_RFC2133)
525 return -EINVAL;
526
527 if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
528 return -EAFNOSUPPORT;
529
530 daddr = &lsa->l2tp_addr;
531 if (np->sndflow) {
532 fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
533 if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
534 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
535 if (IS_ERR(flowlabel))
536 return -EINVAL;
537 }
538 }
539
540 /* Otherwise it will be difficult to maintain
541 * sk->sk_dst_cache.
542 */
543 if (sk->sk_state == TCP_ESTABLISHED &&
544 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
545 daddr = &sk->sk_v6_daddr;
546
547 if (addr_len >= sizeof(struct sockaddr_in6) &&
548 lsa->l2tp_scope_id &&
549 ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
550 fl6.flowi6_oif = lsa->l2tp_scope_id;
551 } else {
552 if (sk->sk_state != TCP_ESTABLISHED)
553 return -EDESTADDRREQ;
554
555 daddr = &sk->sk_v6_daddr;
556 fl6.flowlabel = np->flow_label;
557 }
558
559 if (fl6.flowi6_oif == 0)
560 fl6.flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
561
562 if (msg->msg_controllen) {
563 opt = &opt_space;
564 memset(opt, 0, sizeof(struct ipv6_txoptions));
565 opt->tot_len = sizeof(struct ipv6_txoptions);
566 ipc6.opt = opt;
567
568 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
569 if (err < 0) {
570 fl6_sock_release(flowlabel);
571 return err;
572 }
573 if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
574 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
575 if (IS_ERR(flowlabel))
576 return -EINVAL;
577 }
578 if (!(opt->opt_nflen | opt->opt_flen))
579 opt = NULL;
580 }
581
582 if (!opt) {
583 opt = txopt_get(np);
584 opt_to_free = opt;
585 }
586 if (flowlabel)
587 opt = fl6_merge_options(&opt_space, flowlabel, opt);
588 opt = ipv6_fixup_options(&opt_space, opt);
589 ipc6.opt = opt;
590
591 fl6.flowi6_proto = sk->sk_protocol;
592 if (!ipv6_addr_any(daddr))
593 fl6.daddr = *daddr;
594 else
595 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
596 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
597 fl6.saddr = np->saddr;
598
599 final_p = fl6_update_dst(&fl6, opt, &final);
600
601 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
602 fl6.flowi6_oif = np->mcast_oif;
603 else if (!fl6.flowi6_oif)
604 fl6.flowi6_oif = np->ucast_oif;
605
606 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
607
608 if (ipc6.tclass < 0)
609 ipc6.tclass = np->tclass;
610
611 fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
612
613 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
614 if (IS_ERR(dst)) {
615 err = PTR_ERR(dst);
616 goto out;
617 }
618
619 if (ipc6.hlimit < 0)
620 ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
621
622 if (ipc6.dontfrag < 0)
623 ipc6.dontfrag = np->dontfrag;
624
625 if (msg->msg_flags & MSG_CONFIRM)
626 goto do_confirm;
627
628 back_from_confirm:
629 lock_sock(sk);
630 ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
631 err = ip6_append_data(sk, ip_generic_getfrag, msg,
632 ulen, transhdrlen, &ipc6,
633 &fl6, (struct rt6_info *)dst,
634 msg->msg_flags);
635 if (err)
636 ip6_flush_pending_frames(sk);
637 else if (!(msg->msg_flags & MSG_MORE))
638 err = l2tp_ip6_push_pending_frames(sk);
639 release_sock(sk);
640 done:
641 dst_release(dst);
642 out:
643 fl6_sock_release(flowlabel);
644 txopt_put(opt_to_free);
645
646 return err < 0 ? err : len;
647
648 do_confirm:
649 if (msg->msg_flags & MSG_PROBE)
650 dst_confirm_neigh(dst, &fl6.daddr);
651 if (!(msg->msg_flags & MSG_PROBE) || len)
652 goto back_from_confirm;
653 err = 0;
654 goto done;
655 }
656
l2tp_ip6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)657 static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
658 int flags, int *addr_len)
659 {
660 struct ipv6_pinfo *np = inet6_sk(sk);
661 DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
662 size_t copied = 0;
663 int err = -EOPNOTSUPP;
664 struct sk_buff *skb;
665
666 if (flags & MSG_OOB)
667 goto out;
668
669 if (flags & MSG_ERRQUEUE)
670 return ipv6_recv_error(sk, msg, len, addr_len);
671
672 skb = skb_recv_datagram(sk, flags, &err);
673 if (!skb)
674 goto out;
675
676 copied = skb->len;
677 if (len < copied) {
678 msg->msg_flags |= MSG_TRUNC;
679 copied = len;
680 }
681
682 err = skb_copy_datagram_msg(skb, 0, msg, copied);
683 if (err)
684 goto done;
685
686 sock_recv_timestamp(msg, sk, skb);
687
688 /* Copy the address. */
689 if (lsa) {
690 lsa->l2tp_family = AF_INET6;
691 lsa->l2tp_unused = 0;
692 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
693 lsa->l2tp_flowinfo = 0;
694 lsa->l2tp_scope_id = 0;
695 lsa->l2tp_conn_id = 0;
696 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
697 lsa->l2tp_scope_id = inet6_iif(skb);
698 *addr_len = sizeof(*lsa);
699 }
700
701 if (np->rxopt.all)
702 ip6_datagram_recv_ctl(sk, msg, skb);
703
704 if (flags & MSG_TRUNC)
705 copied = skb->len;
706 done:
707 skb_free_datagram(sk, skb);
708 out:
709 return err ? err : copied;
710 }
711
712 static struct proto l2tp_ip6_prot = {
713 .name = "L2TP/IPv6",
714 .owner = THIS_MODULE,
715 .init = l2tp_ip6_open,
716 .close = l2tp_ip6_close,
717 .bind = l2tp_ip6_bind,
718 .connect = l2tp_ip6_connect,
719 .disconnect = l2tp_ip6_disconnect,
720 .ioctl = l2tp_ioctl,
721 .destroy = l2tp_ip6_destroy_sock,
722 .setsockopt = ipv6_setsockopt,
723 .getsockopt = ipv6_getsockopt,
724 .sendmsg = l2tp_ip6_sendmsg,
725 .recvmsg = l2tp_ip6_recvmsg,
726 .backlog_rcv = l2tp_ip6_backlog_recv,
727 .hash = l2tp_ip6_hash,
728 .unhash = l2tp_ip6_unhash,
729 .obj_size = sizeof(struct l2tp_ip6_sock),
730 .ipv6_pinfo_offset = offsetof(struct l2tp_ip6_sock, inet6),
731 };
732
733 static const struct proto_ops l2tp_ip6_ops = {
734 .family = PF_INET6,
735 .owner = THIS_MODULE,
736 .release = inet6_release,
737 .bind = inet6_bind,
738 .connect = inet_dgram_connect,
739 .socketpair = sock_no_socketpair,
740 .accept = sock_no_accept,
741 .getname = l2tp_ip6_getname,
742 .poll = datagram_poll,
743 .ioctl = inet6_ioctl,
744 .gettstamp = sock_gettstamp,
745 .listen = sock_no_listen,
746 .shutdown = inet_shutdown,
747 .setsockopt = sock_common_setsockopt,
748 .getsockopt = sock_common_getsockopt,
749 .sendmsg = inet_sendmsg,
750 .recvmsg = sock_common_recvmsg,
751 .mmap = sock_no_mmap,
752 #ifdef CONFIG_COMPAT
753 .compat_ioctl = inet6_compat_ioctl,
754 #endif
755 };
756
757 static struct inet_protosw l2tp_ip6_protosw = {
758 .type = SOCK_DGRAM,
759 .protocol = IPPROTO_L2TP,
760 .prot = &l2tp_ip6_prot,
761 .ops = &l2tp_ip6_ops,
762 };
763
764 static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
765 .handler = l2tp_ip6_recv,
766 };
767
l2tp_ip6_init(void)768 static int __init l2tp_ip6_init(void)
769 {
770 int err;
771
772 pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
773
774 err = proto_register(&l2tp_ip6_prot, 1);
775 if (err != 0)
776 goto out;
777
778 err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
779 if (err)
780 goto out1;
781
782 inet6_register_protosw(&l2tp_ip6_protosw);
783 return 0;
784
785 out1:
786 proto_unregister(&l2tp_ip6_prot);
787 out:
788 return err;
789 }
790
l2tp_ip6_exit(void)791 static void __exit l2tp_ip6_exit(void)
792 {
793 inet6_unregister_protosw(&l2tp_ip6_protosw);
794 inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
795 proto_unregister(&l2tp_ip6_prot);
796 }
797
798 module_init(l2tp_ip6_init);
799 module_exit(l2tp_ip6_exit);
800
801 MODULE_LICENSE("GPL");
802 MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
803 MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
804 MODULE_VERSION("1.0");
805
806 /* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol,
807 * because __stringify doesn't like enums
808 */
809 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2);
810 MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
811