1 /* 2 * L2TPv3 IP encapsulation support 3 * 4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 14 #include <asm/ioctls.h> 15 #include <linux/icmp.h> 16 #include <linux/module.h> 17 #include <linux/skbuff.h> 18 #include <linux/random.h> 19 #include <linux/socket.h> 20 #include <linux/l2tp.h> 21 #include <linux/in.h> 22 #include <net/sock.h> 23 #include <net/ip.h> 24 #include <net/icmp.h> 25 #include <net/udp.h> 26 #include <net/inet_common.h> 27 #include <net/inet_hashtables.h> 28 #include <net/tcp_states.h> 29 #include <net/protocol.h> 30 #include <net/xfrm.h> 31 32 #include "l2tp_core.h" 33 34 struct l2tp_ip_sock { 35 /* inet_sock has to be the first member of l2tp_ip_sock */ 36 struct inet_sock inet; 37 38 u32 conn_id; 39 u32 peer_conn_id; 40 }; 41 42 static DEFINE_RWLOCK(l2tp_ip_lock); 43 static struct hlist_head l2tp_ip_table; 44 static struct hlist_head l2tp_ip_bind_table; 45 46 static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) 47 { 48 return (struct l2tp_ip_sock *)sk; 49 } 50 51 static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, 52 __be32 raddr, int dif, u32 tunnel_id) 53 { 54 struct sock *sk; 55 56 sk_for_each_bound(sk, &l2tp_ip_bind_table) { 57 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); 58 const struct inet_sock *inet = inet_sk(sk); 59 60 if (!net_eq(sock_net(sk), net)) 61 continue; 62 63 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif) 64 continue; 65 66 if (inet->inet_rcv_saddr && laddr && 67 inet->inet_rcv_saddr != laddr) 68 continue; 69 70 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr) 71 continue; 72 73 if (l2tp->conn_id != tunnel_id) 74 continue; 75 76 goto found; 77 } 78 79 sk = NULL; 80 found: 81 return sk; 82 } 83 84 /* When processing receive frames, there are two cases to 85 * consider. Data frames consist of a non-zero session-id and an 86 * optional cookie. Control frames consist of a regular L2TP header 87 * preceded by 32-bits of zeros. 88 * 89 * L2TPv3 Session Header Over IP 90 * 91 * 0 1 2 3 92 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 94 * | Session ID | 95 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 96 * | Cookie (optional, maximum 64 bits)... 97 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 98 * | 99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 100 * 101 * L2TPv3 Control Message Header Over IP 102 * 103 * 0 1 2 3 104 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 106 * | (32 bits of zeros) | 107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 108 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | 109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 110 * | Control Connection ID | 111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 112 * | Ns | Nr | 113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 114 * 115 * All control frames are passed to userspace. 116 */ 117 static int l2tp_ip_recv(struct sk_buff *skb) 118 { 119 struct net *net = dev_net(skb->dev); 120 struct sock *sk; 121 u32 session_id; 122 u32 tunnel_id; 123 unsigned char *ptr, *optr; 124 struct l2tp_session *session; 125 struct l2tp_tunnel *tunnel = NULL; 126 int length; 127 128 if (!pskb_may_pull(skb, 4)) 129 goto discard; 130 131 /* Point to L2TP header */ 132 optr = ptr = skb->data; 133 session_id = ntohl(*((__be32 *) ptr)); 134 ptr += 4; 135 136 /* RFC3931: L2TP/IP packets have the first 4 bytes containing 137 * the session_id. If it is 0, the packet is a L2TP control 138 * frame and the session_id value can be discarded. 139 */ 140 if (session_id == 0) { 141 __skb_pull(skb, 4); 142 goto pass_up; 143 } 144 145 /* Ok, this is a data packet. Lookup the session. */ 146 session = l2tp_session_get(net, NULL, session_id, true); 147 if (!session) 148 goto discard; 149 150 tunnel = session->tunnel; 151 if (!tunnel) 152 goto discard_sess; 153 154 /* Trace packet contents, if enabled */ 155 if (tunnel->debug & L2TP_MSG_DATA) { 156 length = min(32u, skb->len); 157 if (!pskb_may_pull(skb, length)) 158 goto discard_sess; 159 160 /* Point to L2TP header */ 161 optr = ptr = skb->data; 162 ptr += 4; 163 pr_debug("%s: ip recv\n", tunnel->name); 164 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 165 } 166 167 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); 168 l2tp_session_dec_refcount(session); 169 170 return 0; 171 172 pass_up: 173 /* Get the tunnel_id from the L2TP header */ 174 if (!pskb_may_pull(skb, 12)) 175 goto discard; 176 177 if ((skb->data[0] & 0xc0) != 0xc0) 178 goto discard; 179 180 tunnel_id = ntohl(*(__be32 *) &skb->data[4]); 181 tunnel = l2tp_tunnel_find(net, tunnel_id); 182 if (tunnel) { 183 sk = tunnel->sock; 184 sock_hold(sk); 185 } else { 186 struct iphdr *iph = (struct iphdr *) skb_network_header(skb); 187 188 read_lock_bh(&l2tp_ip_lock); 189 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, 190 inet_iif(skb), tunnel_id); 191 if (!sk) { 192 read_unlock_bh(&l2tp_ip_lock); 193 goto discard; 194 } 195 196 sock_hold(sk); 197 read_unlock_bh(&l2tp_ip_lock); 198 } 199 200 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 201 goto discard_put; 202 203 nf_reset(skb); 204 205 return sk_receive_skb(sk, skb, 1); 206 207 discard_sess: 208 if (session->deref) 209 session->deref(session); 210 l2tp_session_dec_refcount(session); 211 goto discard; 212 213 discard_put: 214 sock_put(sk); 215 216 discard: 217 kfree_skb(skb); 218 return 0; 219 } 220 221 static int l2tp_ip_open(struct sock *sk) 222 { 223 /* Prevent autobind. We don't have ports. */ 224 inet_sk(sk)->inet_num = IPPROTO_L2TP; 225 226 write_lock_bh(&l2tp_ip_lock); 227 sk_add_node(sk, &l2tp_ip_table); 228 write_unlock_bh(&l2tp_ip_lock); 229 230 return 0; 231 } 232 233 static void l2tp_ip_close(struct sock *sk, long timeout) 234 { 235 write_lock_bh(&l2tp_ip_lock); 236 hlist_del_init(&sk->sk_bind_node); 237 sk_del_node_init(sk); 238 write_unlock_bh(&l2tp_ip_lock); 239 sk_common_release(sk); 240 } 241 242 static void l2tp_ip_destroy_sock(struct sock *sk) 243 { 244 struct sk_buff *skb; 245 struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); 246 247 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 248 kfree_skb(skb); 249 250 if (tunnel) { 251 l2tp_tunnel_closeall(tunnel); 252 sock_put(sk); 253 } 254 255 sk_refcnt_debug_dec(sk); 256 } 257 258 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 259 { 260 struct inet_sock *inet = inet_sk(sk); 261 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; 262 struct net *net = sock_net(sk); 263 int ret; 264 int chk_addr_ret; 265 266 if (addr_len < sizeof(struct sockaddr_l2tpip)) 267 return -EINVAL; 268 if (addr->l2tp_family != AF_INET) 269 return -EINVAL; 270 271 lock_sock(sk); 272 273 ret = -EINVAL; 274 if (!sock_flag(sk, SOCK_ZAPPED)) 275 goto out; 276 277 if (sk->sk_state != TCP_CLOSE) 278 goto out; 279 280 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); 281 ret = -EADDRNOTAVAIL; 282 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && 283 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 284 goto out; 285 286 if (addr->l2tp_addr.s_addr) 287 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 288 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 289 inet->inet_saddr = 0; /* Use device */ 290 291 write_lock_bh(&l2tp_ip_lock); 292 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0, 293 sk->sk_bound_dev_if, addr->l2tp_conn_id)) { 294 write_unlock_bh(&l2tp_ip_lock); 295 ret = -EADDRINUSE; 296 goto out; 297 } 298 299 sk_dst_reset(sk); 300 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 301 302 sk_add_bind_node(sk, &l2tp_ip_bind_table); 303 sk_del_node_init(sk); 304 write_unlock_bh(&l2tp_ip_lock); 305 306 ret = 0; 307 sock_reset_flag(sk, SOCK_ZAPPED); 308 309 out: 310 release_sock(sk); 311 312 return ret; 313 } 314 315 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 316 { 317 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 318 int rc; 319 320 if (addr_len < sizeof(*lsa)) 321 return -EINVAL; 322 323 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 324 return -EINVAL; 325 326 lock_sock(sk); 327 328 /* Must bind first - autobinding does not work */ 329 if (sock_flag(sk, SOCK_ZAPPED)) { 330 rc = -EINVAL; 331 goto out_sk; 332 } 333 334 rc = __ip4_datagram_connect(sk, uaddr, addr_len); 335 if (rc < 0) 336 goto out_sk; 337 338 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 339 340 write_lock_bh(&l2tp_ip_lock); 341 hlist_del_init(&sk->sk_bind_node); 342 sk_add_bind_node(sk, &l2tp_ip_bind_table); 343 write_unlock_bh(&l2tp_ip_lock); 344 345 out_sk: 346 release_sock(sk); 347 348 return rc; 349 } 350 351 static int l2tp_ip_disconnect(struct sock *sk, int flags) 352 { 353 if (sock_flag(sk, SOCK_ZAPPED)) 354 return 0; 355 356 return __udp_disconnect(sk, flags); 357 } 358 359 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 360 int *uaddr_len, int peer) 361 { 362 struct sock *sk = sock->sk; 363 struct inet_sock *inet = inet_sk(sk); 364 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); 365 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; 366 367 memset(lsa, 0, sizeof(*lsa)); 368 lsa->l2tp_family = AF_INET; 369 if (peer) { 370 if (!inet->inet_dport) 371 return -ENOTCONN; 372 lsa->l2tp_conn_id = lsk->peer_conn_id; 373 lsa->l2tp_addr.s_addr = inet->inet_daddr; 374 } else { 375 __be32 addr = inet->inet_rcv_saddr; 376 if (!addr) 377 addr = inet->inet_saddr; 378 lsa->l2tp_conn_id = lsk->conn_id; 379 lsa->l2tp_addr.s_addr = addr; 380 } 381 *uaddr_len = sizeof(*lsa); 382 return 0; 383 } 384 385 static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) 386 { 387 int rc; 388 389 /* Charge it to the socket, dropping if the queue is full. */ 390 rc = sock_queue_rcv_skb(sk, skb); 391 if (rc < 0) 392 goto drop; 393 394 return 0; 395 396 drop: 397 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); 398 kfree_skb(skb); 399 return 0; 400 } 401 402 /* Userspace will call sendmsg() on the tunnel socket to send L2TP 403 * control frames. 404 */ 405 static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 406 { 407 struct sk_buff *skb; 408 int rc; 409 struct inet_sock *inet = inet_sk(sk); 410 struct rtable *rt = NULL; 411 struct flowi4 *fl4; 412 int connected = 0; 413 __be32 daddr; 414 415 lock_sock(sk); 416 417 rc = -ENOTCONN; 418 if (sock_flag(sk, SOCK_DEAD)) 419 goto out; 420 421 /* Get and verify the address. */ 422 if (msg->msg_name) { 423 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name); 424 rc = -EINVAL; 425 if (msg->msg_namelen < sizeof(*lip)) 426 goto out; 427 428 if (lip->l2tp_family != AF_INET) { 429 rc = -EAFNOSUPPORT; 430 if (lip->l2tp_family != AF_UNSPEC) 431 goto out; 432 } 433 434 daddr = lip->l2tp_addr.s_addr; 435 } else { 436 rc = -EDESTADDRREQ; 437 if (sk->sk_state != TCP_ESTABLISHED) 438 goto out; 439 440 daddr = inet->inet_daddr; 441 connected = 1; 442 } 443 444 /* Allocate a socket buffer */ 445 rc = -ENOMEM; 446 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + 447 4 + len, 0, GFP_KERNEL); 448 if (!skb) 449 goto error; 450 451 /* Reserve space for headers, putting IP header on 4-byte boundary. */ 452 skb_reserve(skb, 2 + NET_SKB_PAD); 453 skb_reset_network_header(skb); 454 skb_reserve(skb, sizeof(struct iphdr)); 455 skb_reset_transport_header(skb); 456 457 /* Insert 0 session_id */ 458 *((__be32 *) skb_put(skb, 4)) = 0; 459 460 /* Copy user data into skb */ 461 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 462 if (rc < 0) { 463 kfree_skb(skb); 464 goto error; 465 } 466 467 fl4 = &inet->cork.fl.u.ip4; 468 if (connected) 469 rt = (struct rtable *) __sk_dst_check(sk, 0); 470 471 rcu_read_lock(); 472 if (rt == NULL) { 473 const struct ip_options_rcu *inet_opt; 474 475 inet_opt = rcu_dereference(inet->inet_opt); 476 477 /* Use correct destination address if we have options. */ 478 if (inet_opt && inet_opt->opt.srr) 479 daddr = inet_opt->opt.faddr; 480 481 /* If this fails, retransmit mechanism of transport layer will 482 * keep trying until route appears or the connection times 483 * itself out. 484 */ 485 rt = ip_route_output_ports(sock_net(sk), fl4, sk, 486 daddr, inet->inet_saddr, 487 inet->inet_dport, inet->inet_sport, 488 sk->sk_protocol, RT_CONN_FLAGS(sk), 489 sk->sk_bound_dev_if); 490 if (IS_ERR(rt)) 491 goto no_route; 492 if (connected) { 493 sk_setup_caps(sk, &rt->dst); 494 } else { 495 skb_dst_set(skb, &rt->dst); 496 goto xmit; 497 } 498 } 499 500 /* We dont need to clone dst here, it is guaranteed to not disappear. 501 * __dev_xmit_skb() might force a refcount if needed. 502 */ 503 skb_dst_set_noref(skb, &rt->dst); 504 505 xmit: 506 /* Queue the packet to IP for output */ 507 rc = ip_queue_xmit(sk, skb, &inet->cork.fl); 508 rcu_read_unlock(); 509 510 error: 511 if (rc >= 0) 512 rc = len; 513 514 out: 515 release_sock(sk); 516 return rc; 517 518 no_route: 519 rcu_read_unlock(); 520 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 521 kfree_skb(skb); 522 rc = -EHOSTUNREACH; 523 goto out; 524 } 525 526 static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, 527 size_t len, int noblock, int flags, int *addr_len) 528 { 529 struct inet_sock *inet = inet_sk(sk); 530 size_t copied = 0; 531 int err = -EOPNOTSUPP; 532 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 533 struct sk_buff *skb; 534 535 if (flags & MSG_OOB) 536 goto out; 537 538 skb = skb_recv_datagram(sk, flags, noblock, &err); 539 if (!skb) 540 goto out; 541 542 copied = skb->len; 543 if (len < copied) { 544 msg->msg_flags |= MSG_TRUNC; 545 copied = len; 546 } 547 548 err = skb_copy_datagram_msg(skb, 0, msg, copied); 549 if (err) 550 goto done; 551 552 sock_recv_timestamp(msg, sk, skb); 553 554 /* Copy the address. */ 555 if (sin) { 556 sin->sin_family = AF_INET; 557 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 558 sin->sin_port = 0; 559 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 560 *addr_len = sizeof(*sin); 561 } 562 if (inet->cmsg_flags) 563 ip_cmsg_recv(msg, skb); 564 if (flags & MSG_TRUNC) 565 copied = skb->len; 566 done: 567 skb_free_datagram(sk, skb); 568 out: 569 return err ? err : copied; 570 } 571 572 int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) 573 { 574 struct sk_buff *skb; 575 int amount; 576 577 switch (cmd) { 578 case SIOCOUTQ: 579 amount = sk_wmem_alloc_get(sk); 580 break; 581 case SIOCINQ: 582 spin_lock_bh(&sk->sk_receive_queue.lock); 583 skb = skb_peek(&sk->sk_receive_queue); 584 amount = skb ? skb->len : 0; 585 spin_unlock_bh(&sk->sk_receive_queue.lock); 586 break; 587 588 default: 589 return -ENOIOCTLCMD; 590 } 591 592 return put_user(amount, (int __user *)arg); 593 } 594 EXPORT_SYMBOL(l2tp_ioctl); 595 596 static struct proto l2tp_ip_prot = { 597 .name = "L2TP/IP", 598 .owner = THIS_MODULE, 599 .init = l2tp_ip_open, 600 .close = l2tp_ip_close, 601 .bind = l2tp_ip_bind, 602 .connect = l2tp_ip_connect, 603 .disconnect = l2tp_ip_disconnect, 604 .ioctl = l2tp_ioctl, 605 .destroy = l2tp_ip_destroy_sock, 606 .setsockopt = ip_setsockopt, 607 .getsockopt = ip_getsockopt, 608 .sendmsg = l2tp_ip_sendmsg, 609 .recvmsg = l2tp_ip_recvmsg, 610 .backlog_rcv = l2tp_ip_backlog_recv, 611 .hash = inet_hash, 612 .unhash = inet_unhash, 613 .obj_size = sizeof(struct l2tp_ip_sock), 614 #ifdef CONFIG_COMPAT 615 .compat_setsockopt = compat_ip_setsockopt, 616 .compat_getsockopt = compat_ip_getsockopt, 617 #endif 618 }; 619 620 static const struct proto_ops l2tp_ip_ops = { 621 .family = PF_INET, 622 .owner = THIS_MODULE, 623 .release = inet_release, 624 .bind = inet_bind, 625 .connect = inet_dgram_connect, 626 .socketpair = sock_no_socketpair, 627 .accept = sock_no_accept, 628 .getname = l2tp_ip_getname, 629 .poll = datagram_poll, 630 .ioctl = inet_ioctl, 631 .listen = sock_no_listen, 632 .shutdown = inet_shutdown, 633 .setsockopt = sock_common_setsockopt, 634 .getsockopt = sock_common_getsockopt, 635 .sendmsg = inet_sendmsg, 636 .recvmsg = sock_common_recvmsg, 637 .mmap = sock_no_mmap, 638 .sendpage = sock_no_sendpage, 639 #ifdef CONFIG_COMPAT 640 .compat_setsockopt = compat_sock_common_setsockopt, 641 .compat_getsockopt = compat_sock_common_getsockopt, 642 #endif 643 }; 644 645 static struct inet_protosw l2tp_ip_protosw = { 646 .type = SOCK_DGRAM, 647 .protocol = IPPROTO_L2TP, 648 .prot = &l2tp_ip_prot, 649 .ops = &l2tp_ip_ops, 650 }; 651 652 static struct net_protocol l2tp_ip_protocol __read_mostly = { 653 .handler = l2tp_ip_recv, 654 .netns_ok = 1, 655 }; 656 657 static int __init l2tp_ip_init(void) 658 { 659 int err; 660 661 pr_info("L2TP IP encapsulation support (L2TPv3)\n"); 662 663 err = proto_register(&l2tp_ip_prot, 1); 664 if (err != 0) 665 goto out; 666 667 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); 668 if (err) 669 goto out1; 670 671 inet_register_protosw(&l2tp_ip_protosw); 672 return 0; 673 674 out1: 675 proto_unregister(&l2tp_ip_prot); 676 out: 677 return err; 678 } 679 680 static void __exit l2tp_ip_exit(void) 681 { 682 inet_unregister_protosw(&l2tp_ip_protosw); 683 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); 684 proto_unregister(&l2tp_ip_prot); 685 } 686 687 module_init(l2tp_ip_init); 688 module_exit(l2tp_ip_exit); 689 690 MODULE_LICENSE("GPL"); 691 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); 692 MODULE_DESCRIPTION("L2TP over IP"); 693 MODULE_VERSION("1.0"); 694 695 /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like 696 * enums 697 */ 698 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP); 699 MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP); 700