1 /* 2 * L2TP core. 3 * 4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd 5 * 6 * This file contains some code of the original L2TPv2 pppol2tp 7 * driver, which has the following copyright: 8 * 9 * Authors: Martijn van Oosterhout <kleptog@svana.org> 10 * James Chapman (jchapman@katalix.com) 11 * Contributors: 12 * Michal Ostrowski <mostrows@speakeasy.net> 13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br> 14 * David S. Miller (davem@redhat.com) 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License version 2 as 18 * published by the Free Software Foundation. 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/module.h> 24 #include <linux/string.h> 25 #include <linux/list.h> 26 #include <linux/rculist.h> 27 #include <linux/uaccess.h> 28 29 #include <linux/kernel.h> 30 #include <linux/spinlock.h> 31 #include <linux/kthread.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/errno.h> 35 #include <linux/jiffies.h> 36 37 #include <linux/netdevice.h> 38 #include <linux/net.h> 39 #include <linux/inetdevice.h> 40 #include <linux/skbuff.h> 41 #include <linux/init.h> 42 #include <linux/in.h> 43 #include <linux/ip.h> 44 #include <linux/udp.h> 45 #include <linux/l2tp.h> 46 #include <linux/hash.h> 47 #include <linux/sort.h> 48 #include <linux/file.h> 49 #include <linux/nsproxy.h> 50 #include <net/net_namespace.h> 51 #include <net/netns/generic.h> 52 #include <net/dst.h> 53 #include <net/ip.h> 54 #include <net/udp.h> 55 #include <net/inet_common.h> 56 #include <net/xfrm.h> 57 #include <net/protocol.h> 58 #include <net/inet6_connection_sock.h> 59 #include <net/inet_ecn.h> 60 #include <net/ip6_route.h> 61 #include <net/ip6_checksum.h> 62 63 #include <asm/byteorder.h> 64 #include <linux/atomic.h> 65 66 #include "l2tp_core.h" 67 68 #define L2TP_DRV_VERSION "V2.0" 69 70 /* L2TP header constants */ 71 #define L2TP_HDRFLAG_T 0x8000 72 #define L2TP_HDRFLAG_L 0x4000 73 #define L2TP_HDRFLAG_S 0x0800 74 #define L2TP_HDRFLAG_O 0x0200 75 #define L2TP_HDRFLAG_P 0x0100 76 77 #define L2TP_HDR_VER_MASK 0x000F 78 #define L2TP_HDR_VER_2 0x0002 79 #define L2TP_HDR_VER_3 0x0003 80 81 /* L2TPv3 default L2-specific sublayer */ 82 #define L2TP_SLFLAG_S 0x40000000 83 #define L2TP_SL_SEQ_MASK 0x00ffffff 84 85 #define L2TP_HDR_SIZE_SEQ 10 86 #define L2TP_HDR_SIZE_NOSEQ 6 87 88 /* Default trace flags */ 89 #define L2TP_DEFAULT_DEBUG_FLAGS 0 90 91 /* Private data stored for received packets in the skb. 92 */ 93 struct l2tp_skb_cb { 94 u32 ns; 95 u16 has_seq; 96 u16 length; 97 unsigned long expires; 98 }; 99 100 #define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) 101 102 static atomic_t l2tp_tunnel_count; 103 static atomic_t l2tp_session_count; 104 static struct workqueue_struct *l2tp_wq; 105 106 /* per-net private data for this module */ 107 static unsigned int l2tp_net_id; 108 struct l2tp_net { 109 struct list_head l2tp_tunnel_list; 110 spinlock_t l2tp_tunnel_list_lock; 111 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; 112 spinlock_t l2tp_session_hlist_lock; 113 }; 114 115 static void l2tp_session_set_header_len(struct l2tp_session *session, int version); 116 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); 117 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); 118 119 static inline struct l2tp_net *l2tp_pernet(struct net *net) 120 { 121 BUG_ON(!net); 122 123 return net_generic(net, l2tp_net_id); 124 } 125 126 /* Tunnel reference counts. Incremented per session that is added to 127 * the tunnel. 128 */ 129 static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel) 130 { 131 atomic_inc(&tunnel->ref_count); 132 } 133 134 static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel) 135 { 136 if (atomic_dec_and_test(&tunnel->ref_count)) 137 l2tp_tunnel_free(tunnel); 138 } 139 #ifdef L2TP_REFCNT_DEBUG 140 #define l2tp_tunnel_inc_refcount(_t) \ 141 do { \ 142 pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \ 143 __func__, __LINE__, (_t)->name, \ 144 atomic_read(&_t->ref_count)); \ 145 l2tp_tunnel_inc_refcount_1(_t); \ 146 } while (0) 147 #define l2tp_tunnel_dec_refcount(_t) 148 do { \ 149 pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \ 150 __func__, __LINE__, (_t)->name, \ 151 atomic_read(&_t->ref_count)); \ 152 l2tp_tunnel_dec_refcount_1(_t); \ 153 } while (0) 154 #else 155 #define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t) 156 #define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t) 157 #endif 158 159 /* Session hash global list for L2TPv3. 160 * The session_id SHOULD be random according to RFC3931, but several 161 * L2TP implementations use incrementing session_ids. So we do a real 162 * hash on the session_id, rather than a simple bitmask. 163 */ 164 static inline struct hlist_head * 165 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) 166 { 167 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)]; 168 169 } 170 171 /* Lookup the tunnel socket, possibly involving the fs code if the socket is 172 * owned by userspace. A struct sock returned from this function must be 173 * released using l2tp_tunnel_sock_put once you're done with it. 174 */ 175 struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) 176 { 177 int err = 0; 178 struct socket *sock = NULL; 179 struct sock *sk = NULL; 180 181 if (!tunnel) 182 goto out; 183 184 if (tunnel->fd >= 0) { 185 /* Socket is owned by userspace, who might be in the process 186 * of closing it. Look the socket up using the fd to ensure 187 * consistency. 188 */ 189 sock = sockfd_lookup(tunnel->fd, &err); 190 if (sock) 191 sk = sock->sk; 192 } else { 193 /* Socket is owned by kernelspace */ 194 sk = tunnel->sock; 195 } 196 197 out: 198 return sk; 199 } 200 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); 201 202 /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ 203 void l2tp_tunnel_sock_put(struct sock *sk) 204 { 205 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 206 if (tunnel) { 207 if (tunnel->fd >= 0) { 208 /* Socket is owned by userspace */ 209 sockfd_put(sk->sk_socket); 210 } 211 sock_put(sk); 212 } 213 } 214 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); 215 216 /* Lookup a session by id in the global session list 217 */ 218 static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) 219 { 220 struct l2tp_net *pn = l2tp_pernet(net); 221 struct hlist_head *session_list = 222 l2tp_session_id_hash_2(pn, session_id); 223 struct l2tp_session *session; 224 225 rcu_read_lock_bh(); 226 hlist_for_each_entry_rcu(session, session_list, global_hlist) { 227 if (session->session_id == session_id) { 228 rcu_read_unlock_bh(); 229 return session; 230 } 231 } 232 rcu_read_unlock_bh(); 233 234 return NULL; 235 } 236 237 /* Session hash list. 238 * The session_id SHOULD be random according to RFC2661, but several 239 * L2TP implementations (Cisco and Microsoft) use incrementing 240 * session_ids. So we do a real hash on the session_id, rather than a 241 * simple bitmask. 242 */ 243 static inline struct hlist_head * 244 l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) 245 { 246 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; 247 } 248 249 /* Lookup a session by id 250 */ 251 struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id) 252 { 253 struct hlist_head *session_list; 254 struct l2tp_session *session; 255 256 /* In L2TPv3, session_ids are unique over all tunnels and we 257 * sometimes need to look them up before we know the 258 * tunnel. 259 */ 260 if (tunnel == NULL) 261 return l2tp_session_find_2(net, session_id); 262 263 session_list = l2tp_session_id_hash(tunnel, session_id); 264 read_lock_bh(&tunnel->hlist_lock); 265 hlist_for_each_entry(session, session_list, hlist) { 266 if (session->session_id == session_id) { 267 read_unlock_bh(&tunnel->hlist_lock); 268 return session; 269 } 270 } 271 read_unlock_bh(&tunnel->hlist_lock); 272 273 return NULL; 274 } 275 EXPORT_SYMBOL_GPL(l2tp_session_find); 276 277 struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth) 278 { 279 int hash; 280 struct l2tp_session *session; 281 int count = 0; 282 283 read_lock_bh(&tunnel->hlist_lock); 284 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 285 hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) { 286 if (++count > nth) { 287 read_unlock_bh(&tunnel->hlist_lock); 288 return session; 289 } 290 } 291 } 292 293 read_unlock_bh(&tunnel->hlist_lock); 294 295 return NULL; 296 } 297 EXPORT_SYMBOL_GPL(l2tp_session_find_nth); 298 299 /* Lookup a session by interface name. 300 * This is very inefficient but is only used by management interfaces. 301 */ 302 struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) 303 { 304 struct l2tp_net *pn = l2tp_pernet(net); 305 int hash; 306 struct l2tp_session *session; 307 308 rcu_read_lock_bh(); 309 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { 310 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { 311 if (!strcmp(session->ifname, ifname)) { 312 rcu_read_unlock_bh(); 313 return session; 314 } 315 } 316 } 317 318 rcu_read_unlock_bh(); 319 320 return NULL; 321 } 322 EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); 323 324 /* Lookup a tunnel by id 325 */ 326 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) 327 { 328 struct l2tp_tunnel *tunnel; 329 struct l2tp_net *pn = l2tp_pernet(net); 330 331 rcu_read_lock_bh(); 332 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 333 if (tunnel->tunnel_id == tunnel_id) { 334 rcu_read_unlock_bh(); 335 return tunnel; 336 } 337 } 338 rcu_read_unlock_bh(); 339 340 return NULL; 341 } 342 EXPORT_SYMBOL_GPL(l2tp_tunnel_find); 343 344 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth) 345 { 346 struct l2tp_net *pn = l2tp_pernet(net); 347 struct l2tp_tunnel *tunnel; 348 int count = 0; 349 350 rcu_read_lock_bh(); 351 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 352 if (++count > nth) { 353 rcu_read_unlock_bh(); 354 return tunnel; 355 } 356 } 357 358 rcu_read_unlock_bh(); 359 360 return NULL; 361 } 362 EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); 363 364 /***************************************************************************** 365 * Receive data handling 366 *****************************************************************************/ 367 368 /* Queue a skb in order. We come here only if the skb has an L2TP sequence 369 * number. 370 */ 371 static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb) 372 { 373 struct sk_buff *skbp; 374 struct sk_buff *tmp; 375 u32 ns = L2TP_SKB_CB(skb)->ns; 376 struct l2tp_stats *sstats; 377 378 spin_lock_bh(&session->reorder_q.lock); 379 sstats = &session->stats; 380 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 381 if (L2TP_SKB_CB(skbp)->ns > ns) { 382 __skb_queue_before(&session->reorder_q, skbp, skb); 383 l2tp_dbg(session, L2TP_MSG_SEQ, 384 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 385 session->name, ns, L2TP_SKB_CB(skbp)->ns, 386 skb_queue_len(&session->reorder_q)); 387 u64_stats_update_begin(&sstats->syncp); 388 sstats->rx_oos_packets++; 389 u64_stats_update_end(&sstats->syncp); 390 goto out; 391 } 392 } 393 394 __skb_queue_tail(&session->reorder_q, skb); 395 396 out: 397 spin_unlock_bh(&session->reorder_q.lock); 398 } 399 400 /* Dequeue a single skb. 401 */ 402 static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb) 403 { 404 struct l2tp_tunnel *tunnel = session->tunnel; 405 int length = L2TP_SKB_CB(skb)->length; 406 struct l2tp_stats *tstats, *sstats; 407 408 /* We're about to requeue the skb, so return resources 409 * to its current owner (a socket receive buffer). 410 */ 411 skb_orphan(skb); 412 413 tstats = &tunnel->stats; 414 u64_stats_update_begin(&tstats->syncp); 415 sstats = &session->stats; 416 u64_stats_update_begin(&sstats->syncp); 417 tstats->rx_packets++; 418 tstats->rx_bytes += length; 419 sstats->rx_packets++; 420 sstats->rx_bytes += length; 421 u64_stats_update_end(&tstats->syncp); 422 u64_stats_update_end(&sstats->syncp); 423 424 if (L2TP_SKB_CB(skb)->has_seq) { 425 /* Bump our Nr */ 426 session->nr++; 427 if (tunnel->version == L2TP_HDR_VER_2) 428 session->nr &= 0xffff; 429 else 430 session->nr &= 0xffffff; 431 432 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n", 433 session->name, session->nr); 434 } 435 436 /* call private receive handler */ 437 if (session->recv_skb != NULL) 438 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length); 439 else 440 kfree_skb(skb); 441 442 if (session->deref) 443 (*session->deref)(session); 444 } 445 446 /* Dequeue skbs from the session's reorder_q, subject to packet order. 447 * Skbs that have been in the queue for too long are simply discarded. 448 */ 449 static void l2tp_recv_dequeue(struct l2tp_session *session) 450 { 451 struct sk_buff *skb; 452 struct sk_buff *tmp; 453 struct l2tp_stats *sstats; 454 455 /* If the pkt at the head of the queue has the nr that we 456 * expect to send up next, dequeue it and any other 457 * in-sequence packets behind it. 458 */ 459 start: 460 spin_lock_bh(&session->reorder_q.lock); 461 sstats = &session->stats; 462 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 463 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 464 u64_stats_update_begin(&sstats->syncp); 465 sstats->rx_seq_discards++; 466 sstats->rx_errors++; 467 u64_stats_update_end(&sstats->syncp); 468 l2tp_dbg(session, L2TP_MSG_SEQ, 469 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", 470 session->name, L2TP_SKB_CB(skb)->ns, 471 L2TP_SKB_CB(skb)->length, session->nr, 472 skb_queue_len(&session->reorder_q)); 473 session->reorder_skip = 1; 474 __skb_unlink(skb, &session->reorder_q); 475 kfree_skb(skb); 476 if (session->deref) 477 (*session->deref)(session); 478 continue; 479 } 480 481 if (L2TP_SKB_CB(skb)->has_seq) { 482 if (session->reorder_skip) { 483 l2tp_dbg(session, L2TP_MSG_SEQ, 484 "%s: advancing nr to next pkt: %u -> %u", 485 session->name, session->nr, 486 L2TP_SKB_CB(skb)->ns); 487 session->reorder_skip = 0; 488 session->nr = L2TP_SKB_CB(skb)->ns; 489 } 490 if (L2TP_SKB_CB(skb)->ns != session->nr) { 491 l2tp_dbg(session, L2TP_MSG_SEQ, 492 "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n", 493 session->name, L2TP_SKB_CB(skb)->ns, 494 L2TP_SKB_CB(skb)->length, session->nr, 495 skb_queue_len(&session->reorder_q)); 496 goto out; 497 } 498 } 499 __skb_unlink(skb, &session->reorder_q); 500 501 /* Process the skb. We release the queue lock while we 502 * do so to let other contexts process the queue. 503 */ 504 spin_unlock_bh(&session->reorder_q.lock); 505 l2tp_recv_dequeue_skb(session, skb); 506 goto start; 507 } 508 509 out: 510 spin_unlock_bh(&session->reorder_q.lock); 511 } 512 513 static inline int l2tp_verify_udp_checksum(struct sock *sk, 514 struct sk_buff *skb) 515 { 516 struct udphdr *uh = udp_hdr(skb); 517 u16 ulen = ntohs(uh->len); 518 __wsum psum; 519 520 if (sk->sk_no_check || skb_csum_unnecessary(skb)) 521 return 0; 522 523 #if IS_ENABLED(CONFIG_IPV6) 524 if (sk->sk_family == PF_INET6) { 525 if (!uh->check) { 526 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); 527 return 1; 528 } 529 if ((skb->ip_summed == CHECKSUM_COMPLETE) && 530 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 531 &ipv6_hdr(skb)->daddr, ulen, 532 IPPROTO_UDP, skb->csum)) { 533 skb->ip_summed = CHECKSUM_UNNECESSARY; 534 return 0; 535 } 536 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 537 &ipv6_hdr(skb)->daddr, 538 skb->len, IPPROTO_UDP, 539 0)); 540 } else 541 #endif 542 { 543 struct inet_sock *inet; 544 if (!uh->check) 545 return 0; 546 inet = inet_sk(sk); 547 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, 548 ulen, IPPROTO_UDP, 0); 549 550 if ((skb->ip_summed == CHECKSUM_COMPLETE) && 551 !csum_fold(csum_add(psum, skb->csum))) 552 return 0; 553 skb->csum = psum; 554 } 555 556 return __skb_checksum_complete(skb); 557 } 558 559 /* Do receive processing of L2TP data frames. We handle both L2TPv2 560 * and L2TPv3 data frames here. 561 * 562 * L2TPv2 Data Message Header 563 * 564 * 0 1 2 3 565 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 566 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 567 * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) | 568 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 569 * | Tunnel ID | Session ID | 570 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 571 * | Ns (opt) | Nr (opt) | 572 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 573 * | Offset Size (opt) | Offset pad... (opt) 574 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 575 * 576 * Data frames are marked by T=0. All other fields are the same as 577 * those in L2TP control frames. 578 * 579 * L2TPv3 Data Message Header 580 * 581 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 582 * | L2TP Session Header | 583 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 584 * | L2-Specific Sublayer | 585 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 586 * | Tunnel Payload ... 587 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 588 * 589 * L2TPv3 Session Header Over IP 590 * 591 * 0 1 2 3 592 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 593 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 594 * | Session ID | 595 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 596 * | Cookie (optional, maximum 64 bits)... 597 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 598 * | 599 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 600 * 601 * L2TPv3 L2-Specific Sublayer Format 602 * 603 * 0 1 2 3 604 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 605 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 606 * |x|S|x|x|x|x|x|x| Sequence Number | 607 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 608 * 609 * Cookie value, sublayer format and offset (pad) are negotiated with 610 * the peer when the session is set up. Unlike L2TPv2, we do not need 611 * to parse the packet header to determine if optional fields are 612 * present. 613 * 614 * Caller must already have parsed the frame and determined that it is 615 * a data (not control) frame before coming here. Fields up to the 616 * session-id have already been parsed and ptr points to the data 617 * after the session-id. 618 */ 619 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, 620 unsigned char *ptr, unsigned char *optr, u16 hdrflags, 621 int length, int (*payload_hook)(struct sk_buff *skb)) 622 { 623 struct l2tp_tunnel *tunnel = session->tunnel; 624 int offset; 625 u32 ns, nr; 626 struct l2tp_stats *sstats = &session->stats; 627 628 /* The ref count is increased since we now hold a pointer to 629 * the session. Take care to decrement the refcnt when exiting 630 * this function from now on... 631 */ 632 l2tp_session_inc_refcount(session); 633 if (session->ref) 634 (*session->ref)(session); 635 636 /* Parse and check optional cookie */ 637 if (session->peer_cookie_len > 0) { 638 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { 639 l2tp_info(tunnel, L2TP_MSG_DATA, 640 "%s: cookie mismatch (%u/%u). Discarding.\n", 641 tunnel->name, tunnel->tunnel_id, 642 session->session_id); 643 u64_stats_update_begin(&sstats->syncp); 644 sstats->rx_cookie_discards++; 645 u64_stats_update_end(&sstats->syncp); 646 goto discard; 647 } 648 ptr += session->peer_cookie_len; 649 } 650 651 /* Handle the optional sequence numbers. Sequence numbers are 652 * in different places for L2TPv2 and L2TPv3. 653 * 654 * If we are the LAC, enable/disable sequence numbers under 655 * the control of the LNS. If no sequence numbers present but 656 * we were expecting them, discard frame. 657 */ 658 ns = nr = 0; 659 L2TP_SKB_CB(skb)->has_seq = 0; 660 if (tunnel->version == L2TP_HDR_VER_2) { 661 if (hdrflags & L2TP_HDRFLAG_S) { 662 ns = ntohs(*(__be16 *) ptr); 663 ptr += 2; 664 nr = ntohs(*(__be16 *) ptr); 665 ptr += 2; 666 667 /* Store L2TP info in the skb */ 668 L2TP_SKB_CB(skb)->ns = ns; 669 L2TP_SKB_CB(skb)->has_seq = 1; 670 671 l2tp_dbg(session, L2TP_MSG_SEQ, 672 "%s: recv data ns=%u, nr=%u, session nr=%u\n", 673 session->name, ns, nr, session->nr); 674 } 675 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { 676 u32 l2h = ntohl(*(__be32 *) ptr); 677 678 if (l2h & 0x40000000) { 679 ns = l2h & 0x00ffffff; 680 681 /* Store L2TP info in the skb */ 682 L2TP_SKB_CB(skb)->ns = ns; 683 L2TP_SKB_CB(skb)->has_seq = 1; 684 685 l2tp_dbg(session, L2TP_MSG_SEQ, 686 "%s: recv data ns=%u, session nr=%u\n", 687 session->name, ns, session->nr); 688 } 689 } 690 691 /* Advance past L2-specific header, if present */ 692 ptr += session->l2specific_len; 693 694 if (L2TP_SKB_CB(skb)->has_seq) { 695 /* Received a packet with sequence numbers. If we're the LNS, 696 * check if we sre sending sequence numbers and if not, 697 * configure it so. 698 */ 699 if ((!session->lns_mode) && (!session->send_seq)) { 700 l2tp_info(session, L2TP_MSG_SEQ, 701 "%s: requested to enable seq numbers by LNS\n", 702 session->name); 703 session->send_seq = -1; 704 l2tp_session_set_header_len(session, tunnel->version); 705 } 706 } else { 707 /* No sequence numbers. 708 * If user has configured mandatory sequence numbers, discard. 709 */ 710 if (session->recv_seq) { 711 l2tp_warn(session, L2TP_MSG_SEQ, 712 "%s: recv data has no seq numbers when required. Discarding.\n", 713 session->name); 714 u64_stats_update_begin(&sstats->syncp); 715 sstats->rx_seq_discards++; 716 u64_stats_update_end(&sstats->syncp); 717 goto discard; 718 } 719 720 /* If we're the LAC and we're sending sequence numbers, the 721 * LNS has requested that we no longer send sequence numbers. 722 * If we're the LNS and we're sending sequence numbers, the 723 * LAC is broken. Discard the frame. 724 */ 725 if ((!session->lns_mode) && (session->send_seq)) { 726 l2tp_info(session, L2TP_MSG_SEQ, 727 "%s: requested to disable seq numbers by LNS\n", 728 session->name); 729 session->send_seq = 0; 730 l2tp_session_set_header_len(session, tunnel->version); 731 } else if (session->send_seq) { 732 l2tp_warn(session, L2TP_MSG_SEQ, 733 "%s: recv data has no seq numbers when required. Discarding.\n", 734 session->name); 735 u64_stats_update_begin(&sstats->syncp); 736 sstats->rx_seq_discards++; 737 u64_stats_update_end(&sstats->syncp); 738 goto discard; 739 } 740 } 741 742 /* Session data offset is handled differently for L2TPv2 and 743 * L2TPv3. For L2TPv2, there is an optional 16-bit value in 744 * the header. For L2TPv3, the offset is negotiated using AVPs 745 * in the session setup control protocol. 746 */ 747 if (tunnel->version == L2TP_HDR_VER_2) { 748 /* If offset bit set, skip it. */ 749 if (hdrflags & L2TP_HDRFLAG_O) { 750 offset = ntohs(*(__be16 *)ptr); 751 ptr += 2 + offset; 752 } 753 } else 754 ptr += session->offset; 755 756 offset = ptr - optr; 757 if (!pskb_may_pull(skb, offset)) 758 goto discard; 759 760 __skb_pull(skb, offset); 761 762 /* If caller wants to process the payload before we queue the 763 * packet, do so now. 764 */ 765 if (payload_hook) 766 if ((*payload_hook)(skb)) 767 goto discard; 768 769 /* Prepare skb for adding to the session's reorder_q. Hold 770 * packets for max reorder_timeout or 1 second if not 771 * reordering. 772 */ 773 L2TP_SKB_CB(skb)->length = length; 774 L2TP_SKB_CB(skb)->expires = jiffies + 775 (session->reorder_timeout ? session->reorder_timeout : HZ); 776 777 /* Add packet to the session's receive queue. Reordering is done here, if 778 * enabled. Saved L2TP protocol info is stored in skb->sb[]. 779 */ 780 if (L2TP_SKB_CB(skb)->has_seq) { 781 if (session->reorder_timeout != 0) { 782 /* Packet reordering enabled. Add skb to session's 783 * reorder queue, in order of ns. 784 */ 785 l2tp_recv_queue_skb(session, skb); 786 } else { 787 /* Packet reordering disabled. Discard out-of-sequence 788 * packets 789 */ 790 if (L2TP_SKB_CB(skb)->ns != session->nr) { 791 u64_stats_update_begin(&sstats->syncp); 792 sstats->rx_seq_discards++; 793 u64_stats_update_end(&sstats->syncp); 794 l2tp_dbg(session, L2TP_MSG_SEQ, 795 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", 796 session->name, L2TP_SKB_CB(skb)->ns, 797 L2TP_SKB_CB(skb)->length, session->nr, 798 skb_queue_len(&session->reorder_q)); 799 goto discard; 800 } 801 skb_queue_tail(&session->reorder_q, skb); 802 } 803 } else { 804 /* No sequence numbers. Add the skb to the tail of the 805 * reorder queue. This ensures that it will be 806 * delivered after all previous sequenced skbs. 807 */ 808 skb_queue_tail(&session->reorder_q, skb); 809 } 810 811 /* Try to dequeue as many skbs from reorder_q as we can. */ 812 l2tp_recv_dequeue(session); 813 814 l2tp_session_dec_refcount(session); 815 816 return; 817 818 discard: 819 u64_stats_update_begin(&sstats->syncp); 820 sstats->rx_errors++; 821 u64_stats_update_end(&sstats->syncp); 822 kfree_skb(skb); 823 824 if (session->deref) 825 (*session->deref)(session); 826 827 l2tp_session_dec_refcount(session); 828 } 829 EXPORT_SYMBOL(l2tp_recv_common); 830 831 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame 832 * here. The skb is not on a list when we get here. 833 * Returns 0 if the packet was a data packet and was successfully passed on. 834 * Returns 1 if the packet was not a good data packet and could not be 835 * forwarded. All such packets are passed up to userspace to deal with. 836 */ 837 static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, 838 int (*payload_hook)(struct sk_buff *skb)) 839 { 840 struct l2tp_session *session = NULL; 841 unsigned char *ptr, *optr; 842 u16 hdrflags; 843 u32 tunnel_id, session_id; 844 u16 version; 845 int length; 846 struct l2tp_stats *tstats; 847 848 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 849 goto discard_bad_csum; 850 851 /* UDP always verifies the packet length. */ 852 __skb_pull(skb, sizeof(struct udphdr)); 853 854 /* Short packet? */ 855 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { 856 l2tp_info(tunnel, L2TP_MSG_DATA, 857 "%s: recv short packet (len=%d)\n", 858 tunnel->name, skb->len); 859 goto error; 860 } 861 862 /* Trace packet contents, if enabled */ 863 if (tunnel->debug & L2TP_MSG_DATA) { 864 length = min(32u, skb->len); 865 if (!pskb_may_pull(skb, length)) 866 goto error; 867 868 pr_debug("%s: recv\n", tunnel->name); 869 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); 870 } 871 872 /* Point to L2TP header */ 873 optr = ptr = skb->data; 874 875 /* Get L2TP header flags */ 876 hdrflags = ntohs(*(__be16 *) ptr); 877 878 /* Check protocol version */ 879 version = hdrflags & L2TP_HDR_VER_MASK; 880 if (version != tunnel->version) { 881 l2tp_info(tunnel, L2TP_MSG_DATA, 882 "%s: recv protocol version mismatch: got %d expected %d\n", 883 tunnel->name, version, tunnel->version); 884 goto error; 885 } 886 887 /* Get length of L2TP packet */ 888 length = skb->len; 889 890 /* If type is control packet, it is handled by userspace. */ 891 if (hdrflags & L2TP_HDRFLAG_T) { 892 l2tp_dbg(tunnel, L2TP_MSG_DATA, 893 "%s: recv control packet, len=%d\n", 894 tunnel->name, length); 895 goto error; 896 } 897 898 /* Skip flags */ 899 ptr += 2; 900 901 if (tunnel->version == L2TP_HDR_VER_2) { 902 /* If length is present, skip it */ 903 if (hdrflags & L2TP_HDRFLAG_L) 904 ptr += 2; 905 906 /* Extract tunnel and session ID */ 907 tunnel_id = ntohs(*(__be16 *) ptr); 908 ptr += 2; 909 session_id = ntohs(*(__be16 *) ptr); 910 ptr += 2; 911 } else { 912 ptr += 2; /* skip reserved bits */ 913 tunnel_id = tunnel->tunnel_id; 914 session_id = ntohl(*(__be32 *) ptr); 915 ptr += 4; 916 } 917 918 /* Find the session context */ 919 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); 920 if (!session || !session->recv_skb) { 921 /* Not found? Pass to userspace to deal with */ 922 l2tp_info(tunnel, L2TP_MSG_DATA, 923 "%s: no session found (%u/%u). Passing up.\n", 924 tunnel->name, tunnel_id, session_id); 925 goto error; 926 } 927 928 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); 929 930 return 0; 931 932 discard_bad_csum: 933 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 934 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 935 tstats = &tunnel->stats; 936 u64_stats_update_begin(&tstats->syncp); 937 tstats->rx_errors++; 938 u64_stats_update_end(&tstats->syncp); 939 kfree_skb(skb); 940 941 return 0; 942 943 error: 944 /* Put UDP header back */ 945 __skb_push(skb, sizeof(struct udphdr)); 946 947 return 1; 948 } 949 950 /* UDP encapsulation receive handler. See net/ipv4/udp.c. 951 * Return codes: 952 * 0 : success. 953 * <0: error 954 * >0: skb should be passed up to userspace as UDP. 955 */ 956 int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 957 { 958 struct l2tp_tunnel *tunnel; 959 960 tunnel = l2tp_sock_to_tunnel(sk); 961 if (tunnel == NULL) 962 goto pass_up; 963 964 l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n", 965 tunnel->name, skb->len); 966 967 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook)) 968 goto pass_up_put; 969 970 sock_put(sk); 971 return 0; 972 973 pass_up_put: 974 sock_put(sk); 975 pass_up: 976 return 1; 977 } 978 EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv); 979 980 /************************************************************************ 981 * Transmit handling 982 ***********************************************************************/ 983 984 /* Build an L2TP header for the session into the buffer provided. 985 */ 986 static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf) 987 { 988 struct l2tp_tunnel *tunnel = session->tunnel; 989 __be16 *bufp = buf; 990 __be16 *optr = buf; 991 u16 flags = L2TP_HDR_VER_2; 992 u32 tunnel_id = tunnel->peer_tunnel_id; 993 u32 session_id = session->peer_session_id; 994 995 if (session->send_seq) 996 flags |= L2TP_HDRFLAG_S; 997 998 /* Setup L2TP header. */ 999 *bufp++ = htons(flags); 1000 *bufp++ = htons(tunnel_id); 1001 *bufp++ = htons(session_id); 1002 if (session->send_seq) { 1003 *bufp++ = htons(session->ns); 1004 *bufp++ = 0; 1005 session->ns++; 1006 session->ns &= 0xffff; 1007 l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n", 1008 session->name, session->ns); 1009 } 1010 1011 return bufp - optr; 1012 } 1013 1014 static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) 1015 { 1016 struct l2tp_tunnel *tunnel = session->tunnel; 1017 char *bufp = buf; 1018 char *optr = bufp; 1019 1020 /* Setup L2TP header. The header differs slightly for UDP and 1021 * IP encapsulations. For UDP, there is 4 bytes of flags. 1022 */ 1023 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { 1024 u16 flags = L2TP_HDR_VER_3; 1025 *((__be16 *) bufp) = htons(flags); 1026 bufp += 2; 1027 *((__be16 *) bufp) = 0; 1028 bufp += 2; 1029 } 1030 1031 *((__be32 *) bufp) = htonl(session->peer_session_id); 1032 bufp += 4; 1033 if (session->cookie_len) { 1034 memcpy(bufp, &session->cookie[0], session->cookie_len); 1035 bufp += session->cookie_len; 1036 } 1037 if (session->l2specific_len) { 1038 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { 1039 u32 l2h = 0; 1040 if (session->send_seq) { 1041 l2h = 0x40000000 | session->ns; 1042 session->ns++; 1043 session->ns &= 0xffffff; 1044 l2tp_dbg(session, L2TP_MSG_SEQ, 1045 "%s: updated ns to %u\n", 1046 session->name, session->ns); 1047 } 1048 1049 *((__be32 *) bufp) = htonl(l2h); 1050 } 1051 bufp += session->l2specific_len; 1052 } 1053 if (session->offset) 1054 bufp += session->offset; 1055 1056 return bufp - optr; 1057 } 1058 1059 static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, 1060 struct flowi *fl, size_t data_len) 1061 { 1062 struct l2tp_tunnel *tunnel = session->tunnel; 1063 unsigned int len = skb->len; 1064 int error; 1065 struct l2tp_stats *tstats, *sstats; 1066 1067 /* Debug */ 1068 if (session->send_seq) 1069 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n", 1070 session->name, data_len, session->ns - 1); 1071 else 1072 l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n", 1073 session->name, data_len); 1074 1075 if (session->debug & L2TP_MSG_DATA) { 1076 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1077 unsigned char *datap = skb->data + uhlen; 1078 1079 pr_debug("%s: xmit\n", session->name); 1080 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, 1081 datap, min_t(size_t, 32, len - uhlen)); 1082 } 1083 1084 /* Queue the packet to IP for output */ 1085 skb->local_df = 1; 1086 #if IS_ENABLED(CONFIG_IPV6) 1087 if (skb->sk->sk_family == PF_INET6) 1088 error = inet6_csk_xmit(skb, NULL); 1089 else 1090 #endif 1091 error = ip_queue_xmit(skb, fl); 1092 1093 /* Update stats */ 1094 tstats = &tunnel->stats; 1095 u64_stats_update_begin(&tstats->syncp); 1096 sstats = &session->stats; 1097 u64_stats_update_begin(&sstats->syncp); 1098 if (error >= 0) { 1099 tstats->tx_packets++; 1100 tstats->tx_bytes += len; 1101 sstats->tx_packets++; 1102 sstats->tx_bytes += len; 1103 } else { 1104 tstats->tx_errors++; 1105 sstats->tx_errors++; 1106 } 1107 u64_stats_update_end(&tstats->syncp); 1108 u64_stats_update_end(&sstats->syncp); 1109 1110 return 0; 1111 } 1112 1113 /* Automatically called when the skb is freed. 1114 */ 1115 static void l2tp_sock_wfree(struct sk_buff *skb) 1116 { 1117 sock_put(skb->sk); 1118 } 1119 1120 /* For data skbs that we transmit, we associate with the tunnel socket 1121 * but don't do accounting. 1122 */ 1123 static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 1124 { 1125 sock_hold(sk); 1126 skb->sk = sk; 1127 skb->destructor = l2tp_sock_wfree; 1128 } 1129 1130 #if IS_ENABLED(CONFIG_IPV6) 1131 static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb, 1132 int udp_len) 1133 { 1134 struct ipv6_pinfo *np = inet6_sk(sk); 1135 struct udphdr *uh = udp_hdr(skb); 1136 1137 if (!skb_dst(skb) || !skb_dst(skb)->dev || 1138 !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { 1139 __wsum csum = skb_checksum(skb, 0, udp_len, 0); 1140 skb->ip_summed = CHECKSUM_UNNECESSARY; 1141 uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len, 1142 IPPROTO_UDP, csum); 1143 if (uh->check == 0) 1144 uh->check = CSUM_MANGLED_0; 1145 } else { 1146 skb->ip_summed = CHECKSUM_PARTIAL; 1147 skb->csum_start = skb_transport_header(skb) - skb->head; 1148 skb->csum_offset = offsetof(struct udphdr, check); 1149 uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, 1150 udp_len, IPPROTO_UDP, 0); 1151 } 1152 } 1153 #endif 1154 1155 /* If caller requires the skb to have a ppp header, the header must be 1156 * inserted in the skb data before calling this function. 1157 */ 1158 int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len) 1159 { 1160 int data_len = skb->len; 1161 struct l2tp_tunnel *tunnel = session->tunnel; 1162 struct sock *sk = tunnel->sock; 1163 struct flowi *fl; 1164 struct udphdr *uh; 1165 struct inet_sock *inet; 1166 __wsum csum; 1167 int headroom; 1168 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0; 1169 int udp_len; 1170 int ret = NET_XMIT_SUCCESS; 1171 1172 /* Check that there's enough headroom in the skb to insert IP, 1173 * UDP and L2TP headers. If not enough, expand it to 1174 * make room. Adjust truesize. 1175 */ 1176 headroom = NET_SKB_PAD + sizeof(struct iphdr) + 1177 uhlen + hdr_len; 1178 if (skb_cow_head(skb, headroom)) { 1179 kfree_skb(skb); 1180 return NET_XMIT_DROP; 1181 } 1182 1183 skb_orphan(skb); 1184 /* Setup L2TP header */ 1185 session->build_header(session, __skb_push(skb, hdr_len)); 1186 1187 /* Reset skb netfilter state */ 1188 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1189 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1190 IPSKB_REROUTED); 1191 nf_reset(skb); 1192 1193 bh_lock_sock(sk); 1194 if (sock_owned_by_user(sk)) { 1195 kfree_skb(skb); 1196 ret = NET_XMIT_DROP; 1197 goto out_unlock; 1198 } 1199 1200 /* Get routing info from the tunnel socket */ 1201 skb_dst_drop(skb); 1202 skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0))); 1203 1204 inet = inet_sk(sk); 1205 fl = &inet->cork.fl; 1206 switch (tunnel->encap) { 1207 case L2TP_ENCAPTYPE_UDP: 1208 /* Setup UDP header */ 1209 __skb_push(skb, sizeof(*uh)); 1210 skb_reset_transport_header(skb); 1211 uh = udp_hdr(skb); 1212 uh->source = inet->inet_sport; 1213 uh->dest = inet->inet_dport; 1214 udp_len = uhlen + hdr_len + data_len; 1215 uh->len = htons(udp_len); 1216 uh->check = 0; 1217 1218 /* Calculate UDP checksum if configured to do so */ 1219 #if IS_ENABLED(CONFIG_IPV6) 1220 if (sk->sk_family == PF_INET6) 1221 l2tp_xmit_ipv6_csum(sk, skb, udp_len); 1222 else 1223 #endif 1224 if (sk->sk_no_check == UDP_CSUM_NOXMIT) 1225 skb->ip_summed = CHECKSUM_NONE; 1226 else if ((skb_dst(skb) && skb_dst(skb)->dev) && 1227 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) { 1228 skb->ip_summed = CHECKSUM_COMPLETE; 1229 csum = skb_checksum(skb, 0, udp_len, 0); 1230 uh->check = csum_tcpudp_magic(inet->inet_saddr, 1231 inet->inet_daddr, 1232 udp_len, IPPROTO_UDP, csum); 1233 if (uh->check == 0) 1234 uh->check = CSUM_MANGLED_0; 1235 } else { 1236 skb->ip_summed = CHECKSUM_PARTIAL; 1237 skb->csum_start = skb_transport_header(skb) - skb->head; 1238 skb->csum_offset = offsetof(struct udphdr, check); 1239 uh->check = ~csum_tcpudp_magic(inet->inet_saddr, 1240 inet->inet_daddr, 1241 udp_len, IPPROTO_UDP, 0); 1242 } 1243 break; 1244 1245 case L2TP_ENCAPTYPE_IP: 1246 break; 1247 } 1248 1249 l2tp_skb_set_owner_w(skb, sk); 1250 1251 l2tp_xmit_core(session, skb, fl, data_len); 1252 out_unlock: 1253 bh_unlock_sock(sk); 1254 1255 return ret; 1256 } 1257 EXPORT_SYMBOL_GPL(l2tp_xmit_skb); 1258 1259 /***************************************************************************** 1260 * Tinnel and session create/destroy. 1261 *****************************************************************************/ 1262 1263 /* Tunnel socket destruct hook. 1264 * The tunnel context is deleted only when all session sockets have been 1265 * closed. 1266 */ 1267 static void l2tp_tunnel_destruct(struct sock *sk) 1268 { 1269 struct l2tp_tunnel *tunnel; 1270 struct l2tp_net *pn; 1271 1272 tunnel = sk->sk_user_data; 1273 if (tunnel == NULL) 1274 goto end; 1275 1276 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); 1277 1278 1279 /* Disable udp encapsulation */ 1280 switch (tunnel->encap) { 1281 case L2TP_ENCAPTYPE_UDP: 1282 /* No longer an encapsulation socket. See net/ipv4/udp.c */ 1283 (udp_sk(sk))->encap_type = 0; 1284 (udp_sk(sk))->encap_rcv = NULL; 1285 break; 1286 case L2TP_ENCAPTYPE_IP: 1287 break; 1288 } 1289 1290 /* Remove hooks into tunnel socket */ 1291 sk->sk_destruct = tunnel->old_sk_destruct; 1292 sk->sk_user_data = NULL; 1293 tunnel->sock = NULL; 1294 1295 /* Remove the tunnel struct from the tunnel list */ 1296 pn = l2tp_pernet(tunnel->l2tp_net); 1297 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1298 list_del_rcu(&tunnel->list); 1299 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1300 atomic_dec(&l2tp_tunnel_count); 1301 1302 l2tp_tunnel_closeall(tunnel); 1303 l2tp_tunnel_dec_refcount(tunnel); 1304 1305 /* Call the original destructor */ 1306 if (sk->sk_destruct) 1307 (*sk->sk_destruct)(sk); 1308 end: 1309 return; 1310 } 1311 1312 /* When the tunnel is closed, all the attached sessions need to go too. 1313 */ 1314 static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) 1315 { 1316 int hash; 1317 struct hlist_node *walk; 1318 struct hlist_node *tmp; 1319 struct l2tp_session *session; 1320 1321 BUG_ON(tunnel == NULL); 1322 1323 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n", 1324 tunnel->name); 1325 1326 write_lock_bh(&tunnel->hlist_lock); 1327 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { 1328 again: 1329 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { 1330 session = hlist_entry(walk, struct l2tp_session, hlist); 1331 1332 l2tp_info(session, L2TP_MSG_CONTROL, 1333 "%s: closing session\n", session->name); 1334 1335 hlist_del_init(&session->hlist); 1336 1337 /* Since we should hold the sock lock while 1338 * doing any unbinding, we need to release the 1339 * lock we're holding before taking that lock. 1340 * Hold a reference to the sock so it doesn't 1341 * disappear as we're jumping between locks. 1342 */ 1343 if (session->ref != NULL) 1344 (*session->ref)(session); 1345 1346 write_unlock_bh(&tunnel->hlist_lock); 1347 1348 if (tunnel->version != L2TP_HDR_VER_2) { 1349 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1350 1351 spin_lock_bh(&pn->l2tp_session_hlist_lock); 1352 hlist_del_init_rcu(&session->global_hlist); 1353 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1354 synchronize_rcu(); 1355 } 1356 1357 if (session->session_close != NULL) 1358 (*session->session_close)(session); 1359 1360 if (session->deref != NULL) 1361 (*session->deref)(session); 1362 1363 write_lock_bh(&tunnel->hlist_lock); 1364 1365 /* Now restart from the beginning of this hash 1366 * chain. We always remove a session from the 1367 * list so we are guaranteed to make forward 1368 * progress. 1369 */ 1370 goto again; 1371 } 1372 } 1373 write_unlock_bh(&tunnel->hlist_lock); 1374 } 1375 1376 /* Really kill the tunnel. 1377 * Come here only when all sessions have been cleared from the tunnel. 1378 */ 1379 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) 1380 { 1381 BUG_ON(atomic_read(&tunnel->ref_count) != 0); 1382 BUG_ON(tunnel->sock != NULL); 1383 l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name); 1384 kfree_rcu(tunnel, rcu); 1385 } 1386 1387 /* Workqueue tunnel deletion function */ 1388 static void l2tp_tunnel_del_work(struct work_struct *work) 1389 { 1390 struct l2tp_tunnel *tunnel = NULL; 1391 struct socket *sock = NULL; 1392 struct sock *sk = NULL; 1393 1394 tunnel = container_of(work, struct l2tp_tunnel, del_work); 1395 sk = l2tp_tunnel_sock_lookup(tunnel); 1396 if (!sk) 1397 return; 1398 1399 sock = sk->sk_socket; 1400 BUG_ON(!sock); 1401 1402 /* If the tunnel socket was created directly by the kernel, use the 1403 * sk_* API to release the socket now. Otherwise go through the 1404 * inet_* layer to shut the socket down, and let userspace close it. 1405 * In either case the tunnel resources are freed in the socket 1406 * destructor when the tunnel socket goes away. 1407 */ 1408 if (sock->file == NULL) { 1409 kernel_sock_shutdown(sock, SHUT_RDWR); 1410 sk_release_kernel(sk); 1411 } else { 1412 inet_shutdown(sock, 2); 1413 } 1414 1415 l2tp_tunnel_sock_put(sk); 1416 } 1417 1418 /* Create a socket for the tunnel, if one isn't set up by 1419 * userspace. This is used for static tunnels where there is no 1420 * managing L2TP daemon. 1421 * 1422 * Since we don't want these sockets to keep a namespace alive by 1423 * themselves, we drop the socket's namespace refcount after creation. 1424 * These sockets are freed when the namespace exits using the pernet 1425 * exit hook. 1426 */ 1427 static int l2tp_tunnel_sock_create(struct net *net, 1428 u32 tunnel_id, 1429 u32 peer_tunnel_id, 1430 struct l2tp_tunnel_cfg *cfg, 1431 struct socket **sockp) 1432 { 1433 int err = -EINVAL; 1434 struct socket *sock = NULL; 1435 struct sockaddr_in udp_addr = {0}; 1436 struct sockaddr_l2tpip ip_addr = {0}; 1437 #if IS_ENABLED(CONFIG_IPV6) 1438 struct sockaddr_in6 udp6_addr = {0}; 1439 struct sockaddr_l2tpip6 ip6_addr = {0}; 1440 #endif 1441 1442 switch (cfg->encap) { 1443 case L2TP_ENCAPTYPE_UDP: 1444 #if IS_ENABLED(CONFIG_IPV6) 1445 if (cfg->local_ip6 && cfg->peer_ip6) { 1446 err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock); 1447 if (err < 0) 1448 goto out; 1449 1450 sk_change_net(sock->sk, net); 1451 1452 udp6_addr.sin6_family = AF_INET6; 1453 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6, 1454 sizeof(udp6_addr.sin6_addr)); 1455 udp6_addr.sin6_port = htons(cfg->local_udp_port); 1456 err = kernel_bind(sock, (struct sockaddr *) &udp6_addr, 1457 sizeof(udp6_addr)); 1458 if (err < 0) 1459 goto out; 1460 1461 udp6_addr.sin6_family = AF_INET6; 1462 memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6, 1463 sizeof(udp6_addr.sin6_addr)); 1464 udp6_addr.sin6_port = htons(cfg->peer_udp_port); 1465 err = kernel_connect(sock, 1466 (struct sockaddr *) &udp6_addr, 1467 sizeof(udp6_addr), 0); 1468 if (err < 0) 1469 goto out; 1470 } else 1471 #endif 1472 { 1473 err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock); 1474 if (err < 0) 1475 goto out; 1476 1477 sk_change_net(sock->sk, net); 1478 1479 udp_addr.sin_family = AF_INET; 1480 udp_addr.sin_addr = cfg->local_ip; 1481 udp_addr.sin_port = htons(cfg->local_udp_port); 1482 err = kernel_bind(sock, (struct sockaddr *) &udp_addr, 1483 sizeof(udp_addr)); 1484 if (err < 0) 1485 goto out; 1486 1487 udp_addr.sin_family = AF_INET; 1488 udp_addr.sin_addr = cfg->peer_ip; 1489 udp_addr.sin_port = htons(cfg->peer_udp_port); 1490 err = kernel_connect(sock, 1491 (struct sockaddr *) &udp_addr, 1492 sizeof(udp_addr), 0); 1493 if (err < 0) 1494 goto out; 1495 } 1496 1497 if (!cfg->use_udp_checksums) 1498 sock->sk->sk_no_check = UDP_CSUM_NOXMIT; 1499 1500 break; 1501 1502 case L2TP_ENCAPTYPE_IP: 1503 #if IS_ENABLED(CONFIG_IPV6) 1504 if (cfg->local_ip6 && cfg->peer_ip6) { 1505 err = sock_create_kern(AF_INET6, SOCK_DGRAM, 1506 IPPROTO_L2TP, &sock); 1507 if (err < 0) 1508 goto out; 1509 1510 sk_change_net(sock->sk, net); 1511 1512 ip6_addr.l2tp_family = AF_INET6; 1513 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, 1514 sizeof(ip6_addr.l2tp_addr)); 1515 ip6_addr.l2tp_conn_id = tunnel_id; 1516 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr, 1517 sizeof(ip6_addr)); 1518 if (err < 0) 1519 goto out; 1520 1521 ip6_addr.l2tp_family = AF_INET6; 1522 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6, 1523 sizeof(ip6_addr.l2tp_addr)); 1524 ip6_addr.l2tp_conn_id = peer_tunnel_id; 1525 err = kernel_connect(sock, 1526 (struct sockaddr *) &ip6_addr, 1527 sizeof(ip6_addr), 0); 1528 if (err < 0) 1529 goto out; 1530 } else 1531 #endif 1532 { 1533 err = sock_create_kern(AF_INET, SOCK_DGRAM, 1534 IPPROTO_L2TP, &sock); 1535 if (err < 0) 1536 goto out; 1537 1538 sk_change_net(sock->sk, net); 1539 1540 ip_addr.l2tp_family = AF_INET; 1541 ip_addr.l2tp_addr = cfg->local_ip; 1542 ip_addr.l2tp_conn_id = tunnel_id; 1543 err = kernel_bind(sock, (struct sockaddr *) &ip_addr, 1544 sizeof(ip_addr)); 1545 if (err < 0) 1546 goto out; 1547 1548 ip_addr.l2tp_family = AF_INET; 1549 ip_addr.l2tp_addr = cfg->peer_ip; 1550 ip_addr.l2tp_conn_id = peer_tunnel_id; 1551 err = kernel_connect(sock, (struct sockaddr *) &ip_addr, 1552 sizeof(ip_addr), 0); 1553 if (err < 0) 1554 goto out; 1555 } 1556 break; 1557 1558 default: 1559 goto out; 1560 } 1561 1562 out: 1563 *sockp = sock; 1564 if ((err < 0) && sock) { 1565 kernel_sock_shutdown(sock, SHUT_RDWR); 1566 sk_release_kernel(sock->sk); 1567 *sockp = NULL; 1568 } 1569 1570 return err; 1571 } 1572 1573 static struct lock_class_key l2tp_socket_class; 1574 1575 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) 1576 { 1577 struct l2tp_tunnel *tunnel = NULL; 1578 int err; 1579 struct socket *sock = NULL; 1580 struct sock *sk = NULL; 1581 struct l2tp_net *pn; 1582 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP; 1583 1584 /* Get the tunnel socket from the fd, which was opened by 1585 * the userspace L2TP daemon. If not specified, create a 1586 * kernel socket. 1587 */ 1588 if (fd < 0) { 1589 err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id, 1590 cfg, &sock); 1591 if (err < 0) 1592 goto err; 1593 } else { 1594 sock = sockfd_lookup(fd, &err); 1595 if (!sock) { 1596 pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n", 1597 tunnel_id, fd, err); 1598 err = -EBADF; 1599 goto err; 1600 } 1601 1602 /* Reject namespace mismatches */ 1603 if (!net_eq(sock_net(sock->sk), net)) { 1604 pr_err("tunl %u: netns mismatch\n", tunnel_id); 1605 err = -EINVAL; 1606 goto err; 1607 } 1608 } 1609 1610 sk = sock->sk; 1611 1612 if (cfg != NULL) 1613 encap = cfg->encap; 1614 1615 /* Quick sanity checks */ 1616 switch (encap) { 1617 case L2TP_ENCAPTYPE_UDP: 1618 err = -EPROTONOSUPPORT; 1619 if (sk->sk_protocol != IPPROTO_UDP) { 1620 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1621 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP); 1622 goto err; 1623 } 1624 break; 1625 case L2TP_ENCAPTYPE_IP: 1626 err = -EPROTONOSUPPORT; 1627 if (sk->sk_protocol != IPPROTO_L2TP) { 1628 pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n", 1629 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP); 1630 goto err; 1631 } 1632 break; 1633 } 1634 1635 /* Check if this socket has already been prepped */ 1636 tunnel = (struct l2tp_tunnel *)sk->sk_user_data; 1637 if (tunnel != NULL) { 1638 /* This socket has already been prepped */ 1639 err = -EBUSY; 1640 goto err; 1641 } 1642 1643 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL); 1644 if (tunnel == NULL) { 1645 err = -ENOMEM; 1646 goto err; 1647 } 1648 1649 tunnel->version = version; 1650 tunnel->tunnel_id = tunnel_id; 1651 tunnel->peer_tunnel_id = peer_tunnel_id; 1652 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS; 1653 1654 tunnel->magic = L2TP_TUNNEL_MAGIC; 1655 sprintf(&tunnel->name[0], "tunl %u", tunnel_id); 1656 rwlock_init(&tunnel->hlist_lock); 1657 1658 /* The net we belong to */ 1659 tunnel->l2tp_net = net; 1660 pn = l2tp_pernet(net); 1661 1662 if (cfg != NULL) 1663 tunnel->debug = cfg->debug; 1664 1665 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1666 tunnel->encap = encap; 1667 if (encap == L2TP_ENCAPTYPE_UDP) { 1668 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1669 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1670 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1671 #if IS_ENABLED(CONFIG_IPV6) 1672 if (sk->sk_family == PF_INET6) 1673 udpv6_encap_enable(); 1674 else 1675 #endif 1676 udp_encap_enable(); 1677 } 1678 1679 sk->sk_user_data = tunnel; 1680 1681 /* Hook on the tunnel socket destructor so that we can cleanup 1682 * if the tunnel socket goes away. 1683 */ 1684 tunnel->old_sk_destruct = sk->sk_destruct; 1685 sk->sk_destruct = &l2tp_tunnel_destruct; 1686 tunnel->sock = sk; 1687 tunnel->fd = fd; 1688 lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); 1689 1690 sk->sk_allocation = GFP_ATOMIC; 1691 1692 /* Init delete workqueue struct */ 1693 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work); 1694 1695 /* Add tunnel to our list */ 1696 INIT_LIST_HEAD(&tunnel->list); 1697 atomic_inc(&l2tp_tunnel_count); 1698 1699 /* Bump the reference count. The tunnel context is deleted 1700 * only when this drops to zero. Must be done before list insertion 1701 */ 1702 l2tp_tunnel_inc_refcount(tunnel); 1703 spin_lock_bh(&pn->l2tp_tunnel_list_lock); 1704 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list); 1705 spin_unlock_bh(&pn->l2tp_tunnel_list_lock); 1706 1707 err = 0; 1708 err: 1709 if (tunnelp) 1710 *tunnelp = tunnel; 1711 1712 /* If tunnel's socket was created by the kernel, it doesn't 1713 * have a file. 1714 */ 1715 if (sock && sock->file) 1716 sockfd_put(sock); 1717 1718 return err; 1719 } 1720 EXPORT_SYMBOL_GPL(l2tp_tunnel_create); 1721 1722 /* This function is used by the netlink TUNNEL_DELETE command. 1723 */ 1724 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) 1725 { 1726 return (false == queue_work(l2tp_wq, &tunnel->del_work)); 1727 } 1728 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); 1729 1730 /* Really kill the session. 1731 */ 1732 void l2tp_session_free(struct l2tp_session *session) 1733 { 1734 struct l2tp_tunnel *tunnel; 1735 1736 BUG_ON(atomic_read(&session->ref_count) != 0); 1737 1738 tunnel = session->tunnel; 1739 if (tunnel != NULL) { 1740 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 1741 1742 /* Delete the session from the hash */ 1743 write_lock_bh(&tunnel->hlist_lock); 1744 hlist_del_init(&session->hlist); 1745 write_unlock_bh(&tunnel->hlist_lock); 1746 1747 /* Unlink from the global hash if not L2TPv2 */ 1748 if (tunnel->version != L2TP_HDR_VER_2) { 1749 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1750 1751 spin_lock_bh(&pn->l2tp_session_hlist_lock); 1752 hlist_del_init_rcu(&session->global_hlist); 1753 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1754 synchronize_rcu(); 1755 } 1756 1757 if (session->session_id != 0) 1758 atomic_dec(&l2tp_session_count); 1759 1760 sock_put(tunnel->sock); 1761 1762 /* This will delete the tunnel context if this 1763 * is the last session on the tunnel. 1764 */ 1765 session->tunnel = NULL; 1766 l2tp_tunnel_dec_refcount(tunnel); 1767 } 1768 1769 kfree(session); 1770 1771 return; 1772 } 1773 EXPORT_SYMBOL_GPL(l2tp_session_free); 1774 1775 /* This function is used by the netlink SESSION_DELETE command and by 1776 pseudowire modules. 1777 */ 1778 int l2tp_session_delete(struct l2tp_session *session) 1779 { 1780 if (session->session_close != NULL) 1781 (*session->session_close)(session); 1782 1783 l2tp_session_dec_refcount(session); 1784 1785 return 0; 1786 } 1787 EXPORT_SYMBOL_GPL(l2tp_session_delete); 1788 1789 1790 /* We come here whenever a session's send_seq, cookie_len or 1791 * l2specific_len parameters are set. 1792 */ 1793 static void l2tp_session_set_header_len(struct l2tp_session *session, int version) 1794 { 1795 if (version == L2TP_HDR_VER_2) { 1796 session->hdr_len = 6; 1797 if (session->send_seq) 1798 session->hdr_len += 4; 1799 } else { 1800 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset; 1801 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) 1802 session->hdr_len += 4; 1803 } 1804 1805 } 1806 1807 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) 1808 { 1809 struct l2tp_session *session; 1810 1811 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); 1812 if (session != NULL) { 1813 session->magic = L2TP_SESSION_MAGIC; 1814 session->tunnel = tunnel; 1815 1816 session->session_id = session_id; 1817 session->peer_session_id = peer_session_id; 1818 session->nr = 0; 1819 1820 sprintf(&session->name[0], "sess %u/%u", 1821 tunnel->tunnel_id, session->session_id); 1822 1823 skb_queue_head_init(&session->reorder_q); 1824 1825 INIT_HLIST_NODE(&session->hlist); 1826 INIT_HLIST_NODE(&session->global_hlist); 1827 1828 /* Inherit debug options from tunnel */ 1829 session->debug = tunnel->debug; 1830 1831 if (cfg) { 1832 session->pwtype = cfg->pw_type; 1833 session->debug = cfg->debug; 1834 session->mtu = cfg->mtu; 1835 session->mru = cfg->mru; 1836 session->send_seq = cfg->send_seq; 1837 session->recv_seq = cfg->recv_seq; 1838 session->lns_mode = cfg->lns_mode; 1839 session->reorder_timeout = cfg->reorder_timeout; 1840 session->offset = cfg->offset; 1841 session->l2specific_type = cfg->l2specific_type; 1842 session->l2specific_len = cfg->l2specific_len; 1843 session->cookie_len = cfg->cookie_len; 1844 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len); 1845 session->peer_cookie_len = cfg->peer_cookie_len; 1846 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); 1847 } 1848 1849 if (tunnel->version == L2TP_HDR_VER_2) 1850 session->build_header = l2tp_build_l2tpv2_header; 1851 else 1852 session->build_header = l2tp_build_l2tpv3_header; 1853 1854 l2tp_session_set_header_len(session, tunnel->version); 1855 1856 /* Bump the reference count. The session context is deleted 1857 * only when this drops to zero. 1858 */ 1859 l2tp_session_inc_refcount(session); 1860 l2tp_tunnel_inc_refcount(tunnel); 1861 1862 /* Ensure tunnel socket isn't deleted */ 1863 sock_hold(tunnel->sock); 1864 1865 /* Add session to the tunnel's hash list */ 1866 write_lock_bh(&tunnel->hlist_lock); 1867 hlist_add_head(&session->hlist, 1868 l2tp_session_id_hash(tunnel, session_id)); 1869 write_unlock_bh(&tunnel->hlist_lock); 1870 1871 /* And to the global session list if L2TPv3 */ 1872 if (tunnel->version != L2TP_HDR_VER_2) { 1873 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); 1874 1875 spin_lock_bh(&pn->l2tp_session_hlist_lock); 1876 hlist_add_head_rcu(&session->global_hlist, 1877 l2tp_session_id_hash_2(pn, session_id)); 1878 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1879 } 1880 1881 /* Ignore management session in session count value */ 1882 if (session->session_id != 0) 1883 atomic_inc(&l2tp_session_count); 1884 } 1885 1886 return session; 1887 } 1888 EXPORT_SYMBOL_GPL(l2tp_session_create); 1889 1890 /***************************************************************************** 1891 * Init and cleanup 1892 *****************************************************************************/ 1893 1894 static __net_init int l2tp_init_net(struct net *net) 1895 { 1896 struct l2tp_net *pn = net_generic(net, l2tp_net_id); 1897 int hash; 1898 1899 INIT_LIST_HEAD(&pn->l2tp_tunnel_list); 1900 spin_lock_init(&pn->l2tp_tunnel_list_lock); 1901 1902 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) 1903 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); 1904 1905 spin_lock_init(&pn->l2tp_session_hlist_lock); 1906 1907 return 0; 1908 } 1909 1910 static __net_exit void l2tp_exit_net(struct net *net) 1911 { 1912 struct l2tp_net *pn = l2tp_pernet(net); 1913 struct l2tp_tunnel *tunnel = NULL; 1914 1915 rcu_read_lock_bh(); 1916 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { 1917 (void)l2tp_tunnel_delete(tunnel); 1918 } 1919 rcu_read_unlock_bh(); 1920 } 1921 1922 static struct pernet_operations l2tp_net_ops = { 1923 .init = l2tp_init_net, 1924 .exit = l2tp_exit_net, 1925 .id = &l2tp_net_id, 1926 .size = sizeof(struct l2tp_net), 1927 }; 1928 1929 static int __init l2tp_init(void) 1930 { 1931 int rc = 0; 1932 1933 rc = register_pernet_device(&l2tp_net_ops); 1934 if (rc) 1935 goto out; 1936 1937 l2tp_wq = alloc_workqueue("l2tp", WQ_NON_REENTRANT | WQ_UNBOUND, 0); 1938 if (!l2tp_wq) { 1939 pr_err("alloc_workqueue failed\n"); 1940 rc = -ENOMEM; 1941 goto out; 1942 } 1943 1944 pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION); 1945 1946 out: 1947 return rc; 1948 } 1949 1950 static void __exit l2tp_exit(void) 1951 { 1952 unregister_pernet_device(&l2tp_net_ops); 1953 if (l2tp_wq) { 1954 destroy_workqueue(l2tp_wq); 1955 l2tp_wq = NULL; 1956 } 1957 } 1958 1959 module_init(l2tp_init); 1960 module_exit(l2tp_exit); 1961 1962 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); 1963 MODULE_DESCRIPTION("L2TP core"); 1964 MODULE_LICENSE("GPL"); 1965 MODULE_VERSION(L2TP_DRV_VERSION); 1966 1967