1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* L2TPv3 IP encapsulation support 3 * 4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <asm/ioctls.h> 10 #include <linux/icmp.h> 11 #include <linux/module.h> 12 #include <linux/skbuff.h> 13 #include <linux/random.h> 14 #include <linux/socket.h> 15 #include <linux/l2tp.h> 16 #include <linux/in.h> 17 #include <net/sock.h> 18 #include <net/ip.h> 19 #include <net/icmp.h> 20 #include <net/udp.h> 21 #include <net/inet_common.h> 22 #include <net/tcp_states.h> 23 #include <net/protocol.h> 24 #include <net/xfrm.h> 25 26 #include "l2tp_core.h" 27 28 struct l2tp_ip_sock { 29 /* inet_sock has to be the first member of l2tp_ip_sock */ 30 struct inet_sock inet; 31 32 u32 conn_id; 33 u32 peer_conn_id; 34 }; 35 36 static DEFINE_RWLOCK(l2tp_ip_lock); 37 static struct hlist_head l2tp_ip_table; 38 static struct hlist_head l2tp_ip_bind_table; 39 40 static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk) 41 { 42 return (struct l2tp_ip_sock *)sk; 43 } 44 45 static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr, 46 __be32 raddr, int dif, u32 tunnel_id) 47 { 48 struct sock *sk; 49 50 sk_for_each_bound(sk, &l2tp_ip_bind_table) { 51 const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk); 52 const struct inet_sock *inet = inet_sk(sk); 53 54 if (!net_eq(sock_net(sk), net)) 55 continue; 56 57 if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif) 58 continue; 59 60 if (inet->inet_rcv_saddr && laddr && 61 inet->inet_rcv_saddr != laddr) 62 continue; 63 64 if (inet->inet_daddr && raddr && inet->inet_daddr != raddr) 65 continue; 66 67 if (l2tp->conn_id != tunnel_id) 68 continue; 69 70 goto found; 71 } 72 73 sk = NULL; 74 found: 75 return sk; 76 } 77 78 /* When processing receive frames, there are two cases to 79 * consider. Data frames consist of a non-zero session-id and an 80 * optional cookie. Control frames consist of a regular L2TP header 81 * preceded by 32-bits of zeros. 82 * 83 * L2TPv3 Session Header Over IP 84 * 85 * 0 1 2 3 86 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 87 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 88 * | Session ID | 89 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 90 * | Cookie (optional, maximum 64 bits)... 91 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 92 * | 93 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 94 * 95 * L2TPv3 Control Message Header Over IP 96 * 97 * 0 1 2 3 98 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 100 * | (32 bits of zeros) | 101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 102 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | 103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 104 * | Control Connection ID | 105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 106 * | Ns | Nr | 107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 108 * 109 * All control frames are passed to userspace. 110 */ 111 static int l2tp_ip_recv(struct sk_buff *skb) 112 { 113 struct net *net = dev_net(skb->dev); 114 struct sock *sk; 115 u32 session_id; 116 u32 tunnel_id; 117 unsigned char *ptr, *optr; 118 struct l2tp_session *session; 119 struct l2tp_tunnel *tunnel = NULL; 120 struct iphdr *iph; 121 int length; 122 123 if (!pskb_may_pull(skb, 4)) 124 goto discard; 125 126 /* Point to L2TP header */ 127 optr = ptr = skb->data; 128 session_id = ntohl(*((__be32 *)ptr)); 129 ptr += 4; 130 131 /* RFC3931: L2TP/IP packets have the first 4 bytes containing 132 * the session_id. If it is 0, the packet is a L2TP control 133 * frame and the session_id value can be discarded. 134 */ 135 if (session_id == 0) { 136 __skb_pull(skb, 4); 137 goto pass_up; 138 } 139 140 /* Ok, this is a data packet. Lookup the session. */ 141 session = l2tp_session_get(net, session_id); 142 if (!session) 143 goto discard; 144 145 tunnel = session->tunnel; 146 if (!tunnel) 147 goto discard_sess; 148 149 /* Trace packet contents, if enabled */ 150 if (tunnel->debug & L2TP_MSG_DATA) { 151 length = min(32u, skb->len); 152 if (!pskb_may_pull(skb, length)) 153 goto discard_sess; 154 155 /* Point to L2TP header */ 156 optr = ptr = skb->data; 157 ptr += 4; 158 pr_debug("%s: ip recv\n", tunnel->name); 159 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); 160 } 161 162 if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) 163 goto discard_sess; 164 165 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); 166 l2tp_session_dec_refcount(session); 167 168 return 0; 169 170 pass_up: 171 /* Get the tunnel_id from the L2TP header */ 172 if (!pskb_may_pull(skb, 12)) 173 goto discard; 174 175 if ((skb->data[0] & 0xc0) != 0xc0) 176 goto discard; 177 178 tunnel_id = ntohl(*(__be32 *)&skb->data[4]); 179 iph = (struct iphdr *)skb_network_header(skb); 180 181 read_lock_bh(&l2tp_ip_lock); 182 sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb), 183 tunnel_id); 184 if (!sk) { 185 read_unlock_bh(&l2tp_ip_lock); 186 goto discard; 187 } 188 sock_hold(sk); 189 read_unlock_bh(&l2tp_ip_lock); 190 191 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) 192 goto discard_put; 193 194 nf_reset_ct(skb); 195 196 return sk_receive_skb(sk, skb, 1); 197 198 discard_sess: 199 l2tp_session_dec_refcount(session); 200 goto discard; 201 202 discard_put: 203 sock_put(sk); 204 205 discard: 206 kfree_skb(skb); 207 return 0; 208 } 209 210 static int l2tp_ip_hash(struct sock *sk) 211 { 212 if (sk_unhashed(sk)) { 213 write_lock_bh(&l2tp_ip_lock); 214 sk_add_node(sk, &l2tp_ip_table); 215 write_unlock_bh(&l2tp_ip_lock); 216 } 217 return 0; 218 } 219 220 static void l2tp_ip_unhash(struct sock *sk) 221 { 222 if (sk_unhashed(sk)) 223 return; 224 write_lock_bh(&l2tp_ip_lock); 225 sk_del_node_init(sk); 226 write_unlock_bh(&l2tp_ip_lock); 227 } 228 229 static int l2tp_ip_open(struct sock *sk) 230 { 231 /* Prevent autobind. We don't have ports. */ 232 inet_sk(sk)->inet_num = IPPROTO_L2TP; 233 234 l2tp_ip_hash(sk); 235 return 0; 236 } 237 238 static void l2tp_ip_close(struct sock *sk, long timeout) 239 { 240 write_lock_bh(&l2tp_ip_lock); 241 hlist_del_init(&sk->sk_bind_node); 242 sk_del_node_init(sk); 243 write_unlock_bh(&l2tp_ip_lock); 244 sk_common_release(sk); 245 } 246 247 static void l2tp_ip_destroy_sock(struct sock *sk) 248 { 249 struct sk_buff *skb; 250 struct l2tp_tunnel *tunnel = sk->sk_user_data; 251 252 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) 253 kfree_skb(skb); 254 255 if (tunnel) 256 l2tp_tunnel_delete(tunnel); 257 } 258 259 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 260 { 261 struct inet_sock *inet = inet_sk(sk); 262 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr; 263 struct net *net = sock_net(sk); 264 int ret; 265 int chk_addr_ret; 266 267 if (addr_len < sizeof(struct sockaddr_l2tpip)) 268 return -EINVAL; 269 if (addr->l2tp_family != AF_INET) 270 return -EINVAL; 271 272 lock_sock(sk); 273 274 ret = -EINVAL; 275 if (!sock_flag(sk, SOCK_ZAPPED)) 276 goto out; 277 278 if (sk->sk_state != TCP_CLOSE) 279 goto out; 280 281 chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr); 282 ret = -EADDRNOTAVAIL; 283 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL && 284 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) 285 goto out; 286 287 if (addr->l2tp_addr.s_addr) 288 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; 289 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) 290 inet->inet_saddr = 0; /* Use device */ 291 292 write_lock_bh(&l2tp_ip_lock); 293 if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0, 294 sk->sk_bound_dev_if, addr->l2tp_conn_id)) { 295 write_unlock_bh(&l2tp_ip_lock); 296 ret = -EADDRINUSE; 297 goto out; 298 } 299 300 sk_dst_reset(sk); 301 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; 302 303 sk_add_bind_node(sk, &l2tp_ip_bind_table); 304 sk_del_node_init(sk); 305 write_unlock_bh(&l2tp_ip_lock); 306 307 ret = 0; 308 sock_reset_flag(sk, SOCK_ZAPPED); 309 310 out: 311 release_sock(sk); 312 313 return ret; 314 } 315 316 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 317 { 318 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; 319 int rc; 320 321 if (addr_len < sizeof(*lsa)) 322 return -EINVAL; 323 324 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 325 return -EINVAL; 326 327 lock_sock(sk); 328 329 /* Must bind first - autobinding does not work */ 330 if (sock_flag(sk, SOCK_ZAPPED)) { 331 rc = -EINVAL; 332 goto out_sk; 333 } 334 335 rc = __ip4_datagram_connect(sk, uaddr, addr_len); 336 if (rc < 0) 337 goto out_sk; 338 339 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 340 341 write_lock_bh(&l2tp_ip_lock); 342 hlist_del_init(&sk->sk_bind_node); 343 sk_add_bind_node(sk, &l2tp_ip_bind_table); 344 write_unlock_bh(&l2tp_ip_lock); 345 346 out_sk: 347 release_sock(sk); 348 349 return rc; 350 } 351 352 static int l2tp_ip_disconnect(struct sock *sk, int flags) 353 { 354 if (sock_flag(sk, SOCK_ZAPPED)) 355 return 0; 356 357 return __udp_disconnect(sk, flags); 358 } 359 360 static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 361 int peer) 362 { 363 struct sock *sk = sock->sk; 364 struct inet_sock *inet = inet_sk(sk); 365 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk); 366 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; 367 368 memset(lsa, 0, sizeof(*lsa)); 369 lsa->l2tp_family = AF_INET; 370 if (peer) { 371 if (!inet->inet_dport) 372 return -ENOTCONN; 373 lsa->l2tp_conn_id = lsk->peer_conn_id; 374 lsa->l2tp_addr.s_addr = inet->inet_daddr; 375 } else { 376 __be32 addr = inet->inet_rcv_saddr; 377 378 if (!addr) 379 addr = inet->inet_saddr; 380 lsa->l2tp_conn_id = lsk->conn_id; 381 lsa->l2tp_addr.s_addr = addr; 382 } 383 return sizeof(*lsa); 384 } 385 386 static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb) 387 { 388 int rc; 389 390 /* Charge it to the socket, dropping if the queue is full. */ 391 rc = sock_queue_rcv_skb(sk, skb); 392 if (rc < 0) 393 goto drop; 394 395 return 0; 396 397 drop: 398 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); 399 kfree_skb(skb); 400 return 0; 401 } 402 403 /* Userspace will call sendmsg() on the tunnel socket to send L2TP 404 * control frames. 405 */ 406 static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) 407 { 408 struct sk_buff *skb; 409 int rc; 410 struct inet_sock *inet = inet_sk(sk); 411 struct rtable *rt = NULL; 412 struct flowi4 *fl4; 413 int connected = 0; 414 __be32 daddr; 415 416 lock_sock(sk); 417 418 rc = -ENOTCONN; 419 if (sock_flag(sk, SOCK_DEAD)) 420 goto out; 421 422 /* Get and verify the address. */ 423 if (msg->msg_name) { 424 DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name); 425 426 rc = -EINVAL; 427 if (msg->msg_namelen < sizeof(*lip)) 428 goto out; 429 430 if (lip->l2tp_family != AF_INET) { 431 rc = -EAFNOSUPPORT; 432 if (lip->l2tp_family != AF_UNSPEC) 433 goto out; 434 } 435 436 daddr = lip->l2tp_addr.s_addr; 437 } else { 438 rc = -EDESTADDRREQ; 439 if (sk->sk_state != TCP_ESTABLISHED) 440 goto out; 441 442 daddr = inet->inet_daddr; 443 connected = 1; 444 } 445 446 /* Allocate a socket buffer */ 447 rc = -ENOMEM; 448 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) + 449 4 + len, 0, GFP_KERNEL); 450 if (!skb) 451 goto error; 452 453 /* Reserve space for headers, putting IP header on 4-byte boundary. */ 454 skb_reserve(skb, 2 + NET_SKB_PAD); 455 skb_reset_network_header(skb); 456 skb_reserve(skb, sizeof(struct iphdr)); 457 skb_reset_transport_header(skb); 458 459 /* Insert 0 session_id */ 460 *((__be32 *)skb_put(skb, 4)) = 0; 461 462 /* Copy user data into skb */ 463 rc = memcpy_from_msg(skb_put(skb, len), msg, len); 464 if (rc < 0) { 465 kfree_skb(skb); 466 goto error; 467 } 468 469 fl4 = &inet->cork.fl.u.ip4; 470 if (connected) 471 rt = (struct rtable *)__sk_dst_check(sk, 0); 472 473 rcu_read_lock(); 474 if (!rt) { 475 const struct ip_options_rcu *inet_opt; 476 477 inet_opt = rcu_dereference(inet->inet_opt); 478 479 /* Use correct destination address if we have options. */ 480 if (inet_opt && inet_opt->opt.srr) 481 daddr = inet_opt->opt.faddr; 482 483 /* If this fails, retransmit mechanism of transport layer will 484 * keep trying until route appears or the connection times 485 * itself out. 486 */ 487 rt = ip_route_output_ports(sock_net(sk), fl4, sk, 488 daddr, inet->inet_saddr, 489 inet->inet_dport, inet->inet_sport, 490 sk->sk_protocol, RT_CONN_FLAGS(sk), 491 sk->sk_bound_dev_if); 492 if (IS_ERR(rt)) 493 goto no_route; 494 if (connected) { 495 sk_setup_caps(sk, &rt->dst); 496 } else { 497 skb_dst_set(skb, &rt->dst); 498 goto xmit; 499 } 500 } 501 502 /* We dont need to clone dst here, it is guaranteed to not disappear. 503 * __dev_xmit_skb() might force a refcount if needed. 504 */ 505 skb_dst_set_noref(skb, &rt->dst); 506 507 xmit: 508 /* Queue the packet to IP for output */ 509 rc = ip_queue_xmit(sk, skb, &inet->cork.fl); 510 rcu_read_unlock(); 511 512 error: 513 if (rc >= 0) 514 rc = len; 515 516 out: 517 release_sock(sk); 518 return rc; 519 520 no_route: 521 rcu_read_unlock(); 522 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 523 kfree_skb(skb); 524 rc = -EHOSTUNREACH; 525 goto out; 526 } 527 528 static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg, 529 size_t len, int noblock, int flags, int *addr_len) 530 { 531 struct inet_sock *inet = inet_sk(sk); 532 size_t copied = 0; 533 int err = -EOPNOTSUPP; 534 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 535 struct sk_buff *skb; 536 537 if (flags & MSG_OOB) 538 goto out; 539 540 skb = skb_recv_datagram(sk, flags, noblock, &err); 541 if (!skb) 542 goto out; 543 544 copied = skb->len; 545 if (len < copied) { 546 msg->msg_flags |= MSG_TRUNC; 547 copied = len; 548 } 549 550 err = skb_copy_datagram_msg(skb, 0, msg, copied); 551 if (err) 552 goto done; 553 554 sock_recv_timestamp(msg, sk, skb); 555 556 /* Copy the address. */ 557 if (sin) { 558 sin->sin_family = AF_INET; 559 sin->sin_addr.s_addr = ip_hdr(skb)->saddr; 560 sin->sin_port = 0; 561 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 562 *addr_len = sizeof(*sin); 563 } 564 if (inet->cmsg_flags) 565 ip_cmsg_recv(msg, skb); 566 if (flags & MSG_TRUNC) 567 copied = skb->len; 568 done: 569 skb_free_datagram(sk, skb); 570 out: 571 return err ? err : copied; 572 } 573 574 int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) 575 { 576 struct sk_buff *skb; 577 int amount; 578 579 switch (cmd) { 580 case SIOCOUTQ: 581 amount = sk_wmem_alloc_get(sk); 582 break; 583 case SIOCINQ: 584 spin_lock_bh(&sk->sk_receive_queue.lock); 585 skb = skb_peek(&sk->sk_receive_queue); 586 amount = skb ? skb->len : 0; 587 spin_unlock_bh(&sk->sk_receive_queue.lock); 588 break; 589 590 default: 591 return -ENOIOCTLCMD; 592 } 593 594 return put_user(amount, (int __user *)arg); 595 } 596 EXPORT_SYMBOL(l2tp_ioctl); 597 598 static struct proto l2tp_ip_prot = { 599 .name = "L2TP/IP", 600 .owner = THIS_MODULE, 601 .init = l2tp_ip_open, 602 .close = l2tp_ip_close, 603 .bind = l2tp_ip_bind, 604 .connect = l2tp_ip_connect, 605 .disconnect = l2tp_ip_disconnect, 606 .ioctl = l2tp_ioctl, 607 .destroy = l2tp_ip_destroy_sock, 608 .setsockopt = ip_setsockopt, 609 .getsockopt = ip_getsockopt, 610 .sendmsg = l2tp_ip_sendmsg, 611 .recvmsg = l2tp_ip_recvmsg, 612 .backlog_rcv = l2tp_ip_backlog_recv, 613 .hash = l2tp_ip_hash, 614 .unhash = l2tp_ip_unhash, 615 .obj_size = sizeof(struct l2tp_ip_sock), 616 }; 617 618 static const struct proto_ops l2tp_ip_ops = { 619 .family = PF_INET, 620 .owner = THIS_MODULE, 621 .release = inet_release, 622 .bind = inet_bind, 623 .connect = inet_dgram_connect, 624 .socketpair = sock_no_socketpair, 625 .accept = sock_no_accept, 626 .getname = l2tp_ip_getname, 627 .poll = datagram_poll, 628 .ioctl = inet_ioctl, 629 .gettstamp = sock_gettstamp, 630 .listen = sock_no_listen, 631 .shutdown = inet_shutdown, 632 .setsockopt = sock_common_setsockopt, 633 .getsockopt = sock_common_getsockopt, 634 .sendmsg = inet_sendmsg, 635 .recvmsg = sock_common_recvmsg, 636 .mmap = sock_no_mmap, 637 .sendpage = sock_no_sendpage, 638 }; 639 640 static struct inet_protosw l2tp_ip_protosw = { 641 .type = SOCK_DGRAM, 642 .protocol = IPPROTO_L2TP, 643 .prot = &l2tp_ip_prot, 644 .ops = &l2tp_ip_ops, 645 }; 646 647 static struct net_protocol l2tp_ip_protocol __read_mostly = { 648 .handler = l2tp_ip_recv, 649 .netns_ok = 1, 650 }; 651 652 static int __init l2tp_ip_init(void) 653 { 654 int err; 655 656 pr_info("L2TP IP encapsulation support (L2TPv3)\n"); 657 658 err = proto_register(&l2tp_ip_prot, 1); 659 if (err != 0) 660 goto out; 661 662 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); 663 if (err) 664 goto out1; 665 666 inet_register_protosw(&l2tp_ip_protosw); 667 return 0; 668 669 out1: 670 proto_unregister(&l2tp_ip_prot); 671 out: 672 return err; 673 } 674 675 static void __exit l2tp_ip_exit(void) 676 { 677 inet_unregister_protosw(&l2tp_ip_protosw); 678 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP); 679 proto_unregister(&l2tp_ip_prot); 680 } 681 682 module_init(l2tp_ip_init); 683 module_exit(l2tp_ip_exit); 684 685 MODULE_LICENSE("GPL"); 686 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); 687 MODULE_DESCRIPTION("L2TP over IP"); 688 MODULE_VERSION("1.0"); 689 690 /* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like 691 * enums 692 */ 693 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP); 694 MODULE_ALIAS_NET_PF_PROTO(PF_INET, IPPROTO_L2TP); 695