1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Point-to-Point Tunneling Protocol for Linux 4 * 5 * Authors: Dmitry Kozlov <xeb@mail.ru> 6 */ 7 8 #include <linux/string.h> 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/slab.h> 12 #include <linux/errno.h> 13 #include <linux/netdevice.h> 14 #include <linux/net.h> 15 #include <linux/skbuff.h> 16 #include <linux/vmalloc.h> 17 #include <linux/init.h> 18 #include <linux/ppp_channel.h> 19 #include <linux/ppp_defs.h> 20 #include <linux/if_pppox.h> 21 #include <linux/ppp-ioctl.h> 22 #include <linux/notifier.h> 23 #include <linux/file.h> 24 #include <linux/in.h> 25 #include <linux/ip.h> 26 #include <linux/rcupdate.h> 27 #include <linux/spinlock.h> 28 29 #include <net/sock.h> 30 #include <net/protocol.h> 31 #include <net/ip.h> 32 #include <net/icmp.h> 33 #include <net/route.h> 34 #include <net/gre.h> 35 #include <net/pptp.h> 36 37 #include <linux/uaccess.h> 38 39 #define PPTP_DRIVER_VERSION "0.8.5" 40 41 #define MAX_CALLID 65535 42 43 static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1); 44 static struct pppox_sock __rcu **callid_sock; 45 46 static DEFINE_SPINLOCK(chan_lock); 47 48 static struct proto pptp_sk_proto __read_mostly; 49 static const struct ppp_channel_ops pptp_chan_ops; 50 static const struct proto_ops pptp_ops; 51 52 static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr) 53 { 54 struct pppox_sock *sock; 55 struct pptp_opt *opt; 56 57 rcu_read_lock(); 58 sock = rcu_dereference(callid_sock[call_id]); 59 if (sock) { 60 opt = &sock->proto.pptp; 61 if (opt->dst_addr.sin_addr.s_addr != s_addr) 62 sock = NULL; 63 else 64 sock_hold(sk_pppox(sock)); 65 } 66 rcu_read_unlock(); 67 68 return sock; 69 } 70 71 static int lookup_chan_dst(u16 call_id, __be32 d_addr) 72 { 73 struct pppox_sock *sock; 74 struct pptp_opt *opt; 75 int i; 76 77 rcu_read_lock(); 78 i = 1; 79 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) { 80 sock = rcu_dereference(callid_sock[i]); 81 if (!sock) 82 continue; 83 opt = &sock->proto.pptp; 84 if (opt->dst_addr.call_id == call_id && 85 opt->dst_addr.sin_addr.s_addr == d_addr) 86 break; 87 } 88 rcu_read_unlock(); 89 90 return i < MAX_CALLID; 91 } 92 93 static int add_chan(struct pppox_sock *sock, 94 struct pptp_addr *sa) 95 { 96 static int call_id; 97 98 spin_lock(&chan_lock); 99 if (!sa->call_id) { 100 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); 101 if (call_id == MAX_CALLID) { 102 call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); 103 if (call_id == MAX_CALLID) 104 goto out_err; 105 } 106 sa->call_id = call_id; 107 } else if (test_bit(sa->call_id, callid_bitmap)) { 108 goto out_err; 109 } 110 111 sock->proto.pptp.src_addr = *sa; 112 set_bit(sa->call_id, callid_bitmap); 113 rcu_assign_pointer(callid_sock[sa->call_id], sock); 114 spin_unlock(&chan_lock); 115 116 return 0; 117 118 out_err: 119 spin_unlock(&chan_lock); 120 return -1; 121 } 122 123 static void del_chan(struct pppox_sock *sock) 124 { 125 spin_lock(&chan_lock); 126 clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); 127 RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL); 128 spin_unlock(&chan_lock); 129 } 130 131 static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) 132 { 133 struct sock *sk = (struct sock *) chan->private; 134 struct pppox_sock *po = pppox_sk(sk); 135 struct net *net = sock_net(sk); 136 struct pptp_opt *opt = &po->proto.pptp; 137 struct pptp_gre_header *hdr; 138 unsigned int header_len = sizeof(*hdr); 139 struct flowi4 fl4; 140 int islcp; 141 int len; 142 unsigned char *data; 143 __u32 seq_recv; 144 145 146 struct rtable *rt; 147 struct net_device *tdev; 148 struct iphdr *iph; 149 int max_headroom; 150 151 if (sk_pppox(po)->sk_state & PPPOX_DEAD) 152 goto tx_error; 153 154 rt = ip_route_output_ports(net, &fl4, NULL, 155 opt->dst_addr.sin_addr.s_addr, 156 opt->src_addr.sin_addr.s_addr, 157 0, 0, IPPROTO_GRE, 158 RT_TOS(0), 0); 159 if (IS_ERR(rt)) 160 goto tx_error; 161 162 tdev = rt->dst.dev; 163 164 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(*iph) + sizeof(*hdr) + 2; 165 166 if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { 167 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 168 if (!new_skb) { 169 ip_rt_put(rt); 170 goto tx_error; 171 } 172 if (skb->sk) 173 skb_set_owner_w(new_skb, skb->sk); 174 consume_skb(skb); 175 skb = new_skb; 176 } 177 178 data = skb->data; 179 islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7; 180 181 /* compress protocol field */ 182 if ((opt->ppp_flags & SC_COMP_PROT) && data[0] == 0 && !islcp) 183 skb_pull(skb, 1); 184 185 /* Put in the address/control bytes if necessary */ 186 if ((opt->ppp_flags & SC_COMP_AC) == 0 || islcp) { 187 data = skb_push(skb, 2); 188 data[0] = PPP_ALLSTATIONS; 189 data[1] = PPP_UI; 190 } 191 192 len = skb->len; 193 194 seq_recv = opt->seq_recv; 195 196 if (opt->ack_sent == seq_recv) 197 header_len -= sizeof(hdr->ack); 198 199 /* Push down and install GRE header */ 200 skb_push(skb, header_len); 201 hdr = (struct pptp_gre_header *)(skb->data); 202 203 hdr->gre_hd.flags = GRE_KEY | GRE_VERSION_1 | GRE_SEQ; 204 hdr->gre_hd.protocol = GRE_PROTO_PPP; 205 hdr->call_id = htons(opt->dst_addr.call_id); 206 207 hdr->seq = htonl(++opt->seq_sent); 208 if (opt->ack_sent != seq_recv) { 209 /* send ack with this message */ 210 hdr->gre_hd.flags |= GRE_ACK; 211 hdr->ack = htonl(seq_recv); 212 opt->ack_sent = seq_recv; 213 } 214 hdr->payload_len = htons(len); 215 216 /* Push down and install the IP header. */ 217 218 skb_reset_transport_header(skb); 219 skb_push(skb, sizeof(*iph)); 220 skb_reset_network_header(skb); 221 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 222 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); 223 224 iph = ip_hdr(skb); 225 iph->version = 4; 226 iph->ihl = sizeof(struct iphdr) >> 2; 227 if (ip_dont_fragment(sk, &rt->dst)) 228 iph->frag_off = htons(IP_DF); 229 else 230 iph->frag_off = 0; 231 iph->protocol = IPPROTO_GRE; 232 iph->tos = 0; 233 iph->daddr = fl4.daddr; 234 iph->saddr = fl4.saddr; 235 iph->ttl = ip4_dst_hoplimit(&rt->dst); 236 iph->tot_len = htons(skb->len); 237 238 skb_dst_drop(skb); 239 skb_dst_set(skb, &rt->dst); 240 241 nf_reset(skb); 242 243 skb->ip_summed = CHECKSUM_NONE; 244 ip_select_ident(net, skb, NULL); 245 ip_send_check(iph); 246 247 ip_local_out(net, skb->sk, skb); 248 return 1; 249 250 tx_error: 251 kfree_skb(skb); 252 return 1; 253 } 254 255 static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) 256 { 257 struct pppox_sock *po = pppox_sk(sk); 258 struct pptp_opt *opt = &po->proto.pptp; 259 int headersize, payload_len, seq; 260 __u8 *payload; 261 struct pptp_gre_header *header; 262 263 if (!(sk->sk_state & PPPOX_CONNECTED)) { 264 if (sock_queue_rcv_skb(sk, skb)) 265 goto drop; 266 return NET_RX_SUCCESS; 267 } 268 269 header = (struct pptp_gre_header *)(skb->data); 270 headersize = sizeof(*header); 271 272 /* test if acknowledgement present */ 273 if (GRE_IS_ACK(header->gre_hd.flags)) { 274 __u32 ack; 275 276 if (!pskb_may_pull(skb, headersize)) 277 goto drop; 278 header = (struct pptp_gre_header *)(skb->data); 279 280 /* ack in different place if S = 0 */ 281 ack = GRE_IS_SEQ(header->gre_hd.flags) ? header->ack : header->seq; 282 283 ack = ntohl(ack); 284 285 if (ack > opt->ack_recv) 286 opt->ack_recv = ack; 287 /* also handle sequence number wrap-around */ 288 if (WRAPPED(ack, opt->ack_recv)) 289 opt->ack_recv = ack; 290 } else { 291 headersize -= sizeof(header->ack); 292 } 293 /* test if payload present */ 294 if (!GRE_IS_SEQ(header->gre_hd.flags)) 295 goto drop; 296 297 payload_len = ntohs(header->payload_len); 298 seq = ntohl(header->seq); 299 300 /* check for incomplete packet (length smaller than expected) */ 301 if (!pskb_may_pull(skb, headersize + payload_len)) 302 goto drop; 303 304 payload = skb->data + headersize; 305 /* check for expected sequence number */ 306 if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) { 307 if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) && 308 (PPP_PROTOCOL(payload) == PPP_LCP) && 309 ((payload[4] == PPP_LCP_ECHOREQ) || (payload[4] == PPP_LCP_ECHOREP))) 310 goto allow_packet; 311 } else { 312 opt->seq_recv = seq; 313 allow_packet: 314 skb_pull(skb, headersize); 315 316 if (payload[0] == PPP_ALLSTATIONS && payload[1] == PPP_UI) { 317 /* chop off address/control */ 318 if (skb->len < 3) 319 goto drop; 320 skb_pull(skb, 2); 321 } 322 323 skb->ip_summed = CHECKSUM_NONE; 324 skb_set_network_header(skb, skb->head-skb->data); 325 ppp_input(&po->chan, skb); 326 327 return NET_RX_SUCCESS; 328 } 329 drop: 330 kfree_skb(skb); 331 return NET_RX_DROP; 332 } 333 334 static int pptp_rcv(struct sk_buff *skb) 335 { 336 struct pppox_sock *po; 337 struct pptp_gre_header *header; 338 struct iphdr *iph; 339 340 if (skb->pkt_type != PACKET_HOST) 341 goto drop; 342 343 if (!pskb_may_pull(skb, 12)) 344 goto drop; 345 346 iph = ip_hdr(skb); 347 348 header = (struct pptp_gre_header *)skb->data; 349 350 if (header->gre_hd.protocol != GRE_PROTO_PPP || /* PPTP-GRE protocol for PPTP */ 351 GRE_IS_CSUM(header->gre_hd.flags) || /* flag CSUM should be clear */ 352 GRE_IS_ROUTING(header->gre_hd.flags) || /* flag ROUTING should be clear */ 353 !GRE_IS_KEY(header->gre_hd.flags) || /* flag KEY should be set */ 354 (header->gre_hd.flags & GRE_FLAGS)) /* flag Recursion Ctrl should be clear */ 355 /* if invalid, discard this packet */ 356 goto drop; 357 358 po = lookup_chan(htons(header->call_id), iph->saddr); 359 if (po) { 360 skb_dst_drop(skb); 361 nf_reset(skb); 362 return sk_receive_skb(sk_pppox(po), skb, 0); 363 } 364 drop: 365 kfree_skb(skb); 366 return NET_RX_DROP; 367 } 368 369 static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, 370 int sockaddr_len) 371 { 372 struct sock *sk = sock->sk; 373 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 374 struct pppox_sock *po = pppox_sk(sk); 375 int error = 0; 376 377 if (sockaddr_len < sizeof(struct sockaddr_pppox)) 378 return -EINVAL; 379 380 lock_sock(sk); 381 382 if (sk->sk_state & PPPOX_DEAD) { 383 error = -EALREADY; 384 goto out; 385 } 386 387 if (sk->sk_state & PPPOX_BOUND) { 388 error = -EBUSY; 389 goto out; 390 } 391 392 if (add_chan(po, &sp->sa_addr.pptp)) 393 error = -EBUSY; 394 else 395 sk->sk_state |= PPPOX_BOUND; 396 397 out: 398 release_sock(sk); 399 return error; 400 } 401 402 static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, 403 int sockaddr_len, int flags) 404 { 405 struct sock *sk = sock->sk; 406 struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; 407 struct pppox_sock *po = pppox_sk(sk); 408 struct pptp_opt *opt = &po->proto.pptp; 409 struct rtable *rt; 410 struct flowi4 fl4; 411 int error = 0; 412 413 if (sockaddr_len < sizeof(struct sockaddr_pppox)) 414 return -EINVAL; 415 416 if (sp->sa_protocol != PX_PROTO_PPTP) 417 return -EINVAL; 418 419 if (lookup_chan_dst(sp->sa_addr.pptp.call_id, sp->sa_addr.pptp.sin_addr.s_addr)) 420 return -EALREADY; 421 422 lock_sock(sk); 423 /* Check for already bound sockets */ 424 if (sk->sk_state & PPPOX_CONNECTED) { 425 error = -EBUSY; 426 goto end; 427 } 428 429 /* Check for already disconnected sockets, on attempts to disconnect */ 430 if (sk->sk_state & PPPOX_DEAD) { 431 error = -EALREADY; 432 goto end; 433 } 434 435 if (!opt->src_addr.sin_addr.s_addr || !sp->sa_addr.pptp.sin_addr.s_addr) { 436 error = -EINVAL; 437 goto end; 438 } 439 440 po->chan.private = sk; 441 po->chan.ops = &pptp_chan_ops; 442 443 rt = ip_route_output_ports(sock_net(sk), &fl4, sk, 444 opt->dst_addr.sin_addr.s_addr, 445 opt->src_addr.sin_addr.s_addr, 446 0, 0, 447 IPPROTO_GRE, RT_CONN_FLAGS(sk), 0); 448 if (IS_ERR(rt)) { 449 error = -EHOSTUNREACH; 450 goto end; 451 } 452 sk_setup_caps(sk, &rt->dst); 453 454 po->chan.mtu = dst_mtu(&rt->dst); 455 if (!po->chan.mtu) 456 po->chan.mtu = PPP_MRU; 457 po->chan.mtu -= PPTP_HEADER_OVERHEAD; 458 459 po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); 460 error = ppp_register_channel(&po->chan); 461 if (error) { 462 pr_err("PPTP: failed to register PPP channel (%d)\n", error); 463 goto end; 464 } 465 466 opt->dst_addr = sp->sa_addr.pptp; 467 sk->sk_state |= PPPOX_CONNECTED; 468 469 end: 470 release_sock(sk); 471 return error; 472 } 473 474 static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, 475 int peer) 476 { 477 int len = sizeof(struct sockaddr_pppox); 478 struct sockaddr_pppox sp; 479 480 memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); 481 482 sp.sa_family = AF_PPPOX; 483 sp.sa_protocol = PX_PROTO_PPTP; 484 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; 485 486 memcpy(uaddr, &sp, len); 487 488 return len; 489 } 490 491 static int pptp_release(struct socket *sock) 492 { 493 struct sock *sk = sock->sk; 494 struct pppox_sock *po; 495 int error = 0; 496 497 if (!sk) 498 return 0; 499 500 lock_sock(sk); 501 502 if (sock_flag(sk, SOCK_DEAD)) { 503 release_sock(sk); 504 return -EBADF; 505 } 506 507 po = pppox_sk(sk); 508 del_chan(po); 509 synchronize_rcu(); 510 511 pppox_unbind_sock(sk); 512 sk->sk_state = PPPOX_DEAD; 513 514 sock_orphan(sk); 515 sock->sk = NULL; 516 517 release_sock(sk); 518 sock_put(sk); 519 520 return error; 521 } 522 523 static void pptp_sock_destruct(struct sock *sk) 524 { 525 if (!(sk->sk_state & PPPOX_DEAD)) { 526 del_chan(pppox_sk(sk)); 527 pppox_unbind_sock(sk); 528 } 529 skb_queue_purge(&sk->sk_receive_queue); 530 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); 531 } 532 533 static int pptp_create(struct net *net, struct socket *sock, int kern) 534 { 535 int error = -ENOMEM; 536 struct sock *sk; 537 struct pppox_sock *po; 538 struct pptp_opt *opt; 539 540 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern); 541 if (!sk) 542 goto out; 543 544 sock_init_data(sock, sk); 545 546 sock->state = SS_UNCONNECTED; 547 sock->ops = &pptp_ops; 548 549 sk->sk_backlog_rcv = pptp_rcv_core; 550 sk->sk_state = PPPOX_NONE; 551 sk->sk_type = SOCK_STREAM; 552 sk->sk_family = PF_PPPOX; 553 sk->sk_protocol = PX_PROTO_PPTP; 554 sk->sk_destruct = pptp_sock_destruct; 555 556 po = pppox_sk(sk); 557 opt = &po->proto.pptp; 558 559 opt->seq_sent = 0; opt->seq_recv = 0xffffffff; 560 opt->ack_recv = 0; opt->ack_sent = 0xffffffff; 561 562 error = 0; 563 out: 564 return error; 565 } 566 567 static int pptp_ppp_ioctl(struct ppp_channel *chan, unsigned int cmd, 568 unsigned long arg) 569 { 570 struct sock *sk = (struct sock *) chan->private; 571 struct pppox_sock *po = pppox_sk(sk); 572 struct pptp_opt *opt = &po->proto.pptp; 573 void __user *argp = (void __user *)arg; 574 int __user *p = argp; 575 int err, val; 576 577 err = -EFAULT; 578 switch (cmd) { 579 case PPPIOCGFLAGS: 580 val = opt->ppp_flags; 581 if (put_user(val, p)) 582 break; 583 err = 0; 584 break; 585 case PPPIOCSFLAGS: 586 if (get_user(val, p)) 587 break; 588 opt->ppp_flags = val & ~SC_RCV_BITS; 589 err = 0; 590 break; 591 default: 592 err = -ENOTTY; 593 } 594 595 return err; 596 } 597 598 static const struct ppp_channel_ops pptp_chan_ops = { 599 .start_xmit = pptp_xmit, 600 .ioctl = pptp_ppp_ioctl, 601 }; 602 603 static struct proto pptp_sk_proto __read_mostly = { 604 .name = "PPTP", 605 .owner = THIS_MODULE, 606 .obj_size = sizeof(struct pppox_sock), 607 }; 608 609 static const struct proto_ops pptp_ops = { 610 .family = AF_PPPOX, 611 .owner = THIS_MODULE, 612 .release = pptp_release, 613 .bind = pptp_bind, 614 .connect = pptp_connect, 615 .socketpair = sock_no_socketpair, 616 .accept = sock_no_accept, 617 .getname = pptp_getname, 618 .listen = sock_no_listen, 619 .shutdown = sock_no_shutdown, 620 .setsockopt = sock_no_setsockopt, 621 .getsockopt = sock_no_getsockopt, 622 .sendmsg = sock_no_sendmsg, 623 .recvmsg = sock_no_recvmsg, 624 .mmap = sock_no_mmap, 625 .ioctl = pppox_ioctl, 626 }; 627 628 static const struct pppox_proto pppox_pptp_proto = { 629 .create = pptp_create, 630 .owner = THIS_MODULE, 631 }; 632 633 static const struct gre_protocol gre_pptp_protocol = { 634 .handler = pptp_rcv, 635 }; 636 637 static int __init pptp_init_module(void) 638 { 639 int err = 0; 640 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 641 642 callid_sock = vzalloc(array_size(sizeof(void *), (MAX_CALLID + 1))); 643 if (!callid_sock) 644 return -ENOMEM; 645 646 err = gre_add_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 647 if (err) { 648 pr_err("PPTP: can't add gre protocol\n"); 649 goto out_mem_free; 650 } 651 652 err = proto_register(&pptp_sk_proto, 0); 653 if (err) { 654 pr_err("PPTP: can't register sk_proto\n"); 655 goto out_gre_del_protocol; 656 } 657 658 err = register_pppox_proto(PX_PROTO_PPTP, &pppox_pptp_proto); 659 if (err) { 660 pr_err("PPTP: can't register pppox_proto\n"); 661 goto out_unregister_sk_proto; 662 } 663 664 return 0; 665 666 out_unregister_sk_proto: 667 proto_unregister(&pptp_sk_proto); 668 out_gre_del_protocol: 669 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 670 out_mem_free: 671 vfree(callid_sock); 672 673 return err; 674 } 675 676 static void __exit pptp_exit_module(void) 677 { 678 unregister_pppox_proto(PX_PROTO_PPTP); 679 proto_unregister(&pptp_sk_proto); 680 gre_del_protocol(&gre_pptp_protocol, GREPROTO_PPTP); 681 vfree(callid_sock); 682 } 683 684 module_init(pptp_init_module); 685 module_exit(pptp_exit_module); 686 687 MODULE_DESCRIPTION("Point-to-Point Tunneling Protocol"); 688 MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); 689 MODULE_LICENSE("GPL"); 690 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_PPTP); 691