1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Generic PPP layer for Linux. 4 * 5 * Copyright 1999-2002 Paul Mackerras. 6 * 7 * The generic PPP layer handles the PPP network interfaces, the 8 * /dev/ppp device, packet and VJ compression, and multilink. 9 * It talks to PPP `channels' via the interface defined in 10 * include/linux/ppp_channel.h. Channels provide the basic means for 11 * sending and receiving PPP frames on some kind of communications 12 * channel. 13 * 14 * Part of the code in this driver was inspired by the old async-only 15 * PPP driver, written by Michael Callahan and Al Longyear, and 16 * subsequently hacked by Paul Mackerras. 17 * 18 * ==FILEVERSION 20041108== 19 */ 20 21 #include <linux/module.h> 22 #include <linux/kernel.h> 23 #include <linux/sched/signal.h> 24 #include <linux/kmod.h> 25 #include <linux/init.h> 26 #include <linux/list.h> 27 #include <linux/idr.h> 28 #include <linux/netdevice.h> 29 #include <linux/poll.h> 30 #include <linux/ppp_defs.h> 31 #include <linux/filter.h> 32 #include <linux/ppp-ioctl.h> 33 #include <linux/ppp_channel.h> 34 #include <linux/ppp-comp.h> 35 #include <linux/skbuff.h> 36 #include <linux/rtnetlink.h> 37 #include <linux/if_arp.h> 38 #include <linux/ip.h> 39 #include <linux/tcp.h> 40 #include <linux/spinlock.h> 41 #include <linux/rwsem.h> 42 #include <linux/stddef.h> 43 #include <linux/device.h> 44 #include <linux/mutex.h> 45 #include <linux/slab.h> 46 #include <linux/file.h> 47 #include <asm/unaligned.h> 48 #include <net/slhc_vj.h> 49 #include <linux/atomic.h> 50 #include <linux/refcount.h> 51 52 #include <linux/nsproxy.h> 53 #include <net/net_namespace.h> 54 #include <net/netns/generic.h> 55 56 #define PPP_VERSION "2.4.2" 57 58 /* 59 * Network protocols we support. 60 */ 61 #define NP_IP 0 /* Internet Protocol V4 */ 62 #define NP_IPV6 1 /* Internet Protocol V6 */ 63 #define NP_IPX 2 /* IPX protocol */ 64 #define NP_AT 3 /* Appletalk protocol */ 65 #define NP_MPLS_UC 4 /* MPLS unicast */ 66 #define NP_MPLS_MC 5 /* MPLS multicast */ 67 #define NUM_NP 6 /* Number of NPs. */ 68 69 #define MPHDRLEN 6 /* multilink protocol header length */ 70 #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ 71 72 /* 73 * An instance of /dev/ppp can be associated with either a ppp 74 * interface unit or a ppp channel. In both cases, file->private_data 75 * points to one of these. 76 */ 77 struct ppp_file { 78 enum { 79 INTERFACE=1, CHANNEL 80 } kind; 81 struct sk_buff_head xq; /* pppd transmit queue */ 82 struct sk_buff_head rq; /* receive queue for pppd */ 83 wait_queue_head_t rwait; /* for poll on reading /dev/ppp */ 84 refcount_t refcnt; /* # refs (incl /dev/ppp attached) */ 85 int hdrlen; /* space to leave for headers */ 86 int index; /* interface unit / channel number */ 87 int dead; /* unit/channel has been shut down */ 88 }; 89 90 #define PF_TO_X(pf, X) container_of(pf, X, file) 91 92 #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp) 93 #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) 94 95 /* 96 * Data structure to hold primary network stats for which 97 * we want to use 64 bit storage. Other network stats 98 * are stored in dev->stats of the ppp strucute. 99 */ 100 struct ppp_link_stats { 101 u64 rx_packets; 102 u64 tx_packets; 103 u64 rx_bytes; 104 u64 tx_bytes; 105 }; 106 107 /* 108 * Data structure describing one ppp unit. 109 * A ppp unit corresponds to a ppp network interface device 110 * and represents a multilink bundle. 111 * It can have 0 or more ppp channels connected to it. 112 */ 113 struct ppp { 114 struct ppp_file file; /* stuff for read/write/poll 0 */ 115 struct file *owner; /* file that owns this unit 48 */ 116 struct list_head channels; /* list of attached channels 4c */ 117 int n_channels; /* how many channels are attached 54 */ 118 spinlock_t rlock; /* lock for receive side 58 */ 119 spinlock_t wlock; /* lock for transmit side 5c */ 120 int __percpu *xmit_recursion; /* xmit recursion detect */ 121 int mru; /* max receive unit 60 */ 122 unsigned int flags; /* control bits 64 */ 123 unsigned int xstate; /* transmit state bits 68 */ 124 unsigned int rstate; /* receive state bits 6c */ 125 int debug; /* debug flags 70 */ 126 struct slcompress *vj; /* state for VJ header compression */ 127 enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */ 128 struct sk_buff *xmit_pending; /* a packet ready to go out 88 */ 129 struct compressor *xcomp; /* transmit packet compressor 8c */ 130 void *xc_state; /* its internal state 90 */ 131 struct compressor *rcomp; /* receive decompressor 94 */ 132 void *rc_state; /* its internal state 98 */ 133 unsigned long last_xmit; /* jiffies when last pkt sent 9c */ 134 unsigned long last_recv; /* jiffies when last pkt rcvd a0 */ 135 struct net_device *dev; /* network interface device a4 */ 136 int closing; /* is device closing down? a8 */ 137 #ifdef CONFIG_PPP_MULTILINK 138 int nxchan; /* next channel to send something on */ 139 u32 nxseq; /* next sequence number to send */ 140 int mrru; /* MP: max reconst. receive unit */ 141 u32 nextseq; /* MP: seq no of next packet */ 142 u32 minseq; /* MP: min of most recent seqnos */ 143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 144 #endif /* CONFIG_PPP_MULTILINK */ 145 #ifdef CONFIG_PPP_FILTER 146 struct bpf_prog *pass_filter; /* filter for packets to pass */ 147 struct bpf_prog *active_filter; /* filter for pkts to reset idle */ 148 #endif /* CONFIG_PPP_FILTER */ 149 struct net *ppp_net; /* the net we belong to */ 150 struct ppp_link_stats stats64; /* 64 bit network stats */ 151 }; 152 153 /* 154 * Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC, 155 * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP, 156 * SC_MUST_COMP 157 * Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR. 158 * Bits in xstate: SC_COMP_RUN 159 */ 160 #define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \ 161 |SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \ 162 |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP) 163 164 /* 165 * Private data structure for each channel. 166 * This includes the data structure used for multilink. 167 */ 168 struct channel { 169 struct ppp_file file; /* stuff for read/write/poll */ 170 struct list_head list; /* link in all/new_channels list */ 171 struct ppp_channel *chan; /* public channel data structure */ 172 struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */ 173 spinlock_t downl; /* protects `chan', file.xq dequeue */ 174 struct ppp *ppp; /* ppp unit we're connected to */ 175 struct net *chan_net; /* the net channel belongs to */ 176 struct list_head clist; /* link in list of channels per unit */ 177 rwlock_t upl; /* protects `ppp' */ 178 #ifdef CONFIG_PPP_MULTILINK 179 u8 avail; /* flag used in multilink stuff */ 180 u8 had_frag; /* >= 1 fragments have been sent */ 181 u32 lastseq; /* MP: last sequence # received */ 182 int speed; /* speed of the corresponding ppp channel*/ 183 #endif /* CONFIG_PPP_MULTILINK */ 184 }; 185 186 struct ppp_config { 187 struct file *file; 188 s32 unit; 189 bool ifname_is_set; 190 }; 191 192 /* 193 * SMP locking issues: 194 * Both the ppp.rlock and ppp.wlock locks protect the ppp.channels 195 * list and the ppp.n_channels field, you need to take both locks 196 * before you modify them. 197 * The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock -> 198 * channel.downl. 199 */ 200 201 static DEFINE_MUTEX(ppp_mutex); 202 static atomic_t ppp_unit_count = ATOMIC_INIT(0); 203 static atomic_t channel_count = ATOMIC_INIT(0); 204 205 /* per-net private data for this module */ 206 static unsigned int ppp_net_id __read_mostly; 207 struct ppp_net { 208 /* units to ppp mapping */ 209 struct idr units_idr; 210 211 /* 212 * all_ppp_mutex protects the units_idr mapping. 213 * It also ensures that finding a ppp unit in the units_idr 214 * map and updating its file.refcnt field is atomic. 215 */ 216 struct mutex all_ppp_mutex; 217 218 /* channels */ 219 struct list_head all_channels; 220 struct list_head new_channels; 221 int last_channel_index; 222 223 /* 224 * all_channels_lock protects all_channels and 225 * last_channel_index, and the atomicity of find 226 * a channel and updating its file.refcnt field. 227 */ 228 spinlock_t all_channels_lock; 229 }; 230 231 /* Get the PPP protocol number from a skb */ 232 #define PPP_PROTO(skb) get_unaligned_be16((skb)->data) 233 234 /* We limit the length of ppp->file.rq to this (arbitrary) value */ 235 #define PPP_MAX_RQLEN 32 236 237 /* 238 * Maximum number of multilink fragments queued up. 239 * This has to be large enough to cope with the maximum latency of 240 * the slowest channel relative to the others. Strictly it should 241 * depend on the number of channels and their characteristics. 242 */ 243 #define PPP_MP_MAX_QLEN 128 244 245 /* Multilink header bits. */ 246 #define B 0x80 /* this fragment begins a packet */ 247 #define E 0x40 /* this fragment ends a packet */ 248 249 /* Compare multilink sequence numbers (assumed to be 32 bits wide) */ 250 #define seq_before(a, b) ((s32)((a) - (b)) < 0) 251 #define seq_after(a, b) ((s32)((a) - (b)) > 0) 252 253 /* Prototypes. */ 254 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 255 struct file *file, unsigned int cmd, unsigned long arg); 256 static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb); 257 static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 258 static void ppp_push(struct ppp *ppp); 259 static void ppp_channel_push(struct channel *pch); 260 static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, 261 struct channel *pch); 262 static void ppp_receive_error(struct ppp *ppp); 263 static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb); 264 static struct sk_buff *ppp_decompress_frame(struct ppp *ppp, 265 struct sk_buff *skb); 266 #ifdef CONFIG_PPP_MULTILINK 267 static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, 268 struct channel *pch); 269 static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb); 270 static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp); 271 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb); 272 #endif /* CONFIG_PPP_MULTILINK */ 273 static int ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data); 274 static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound); 275 static void ppp_ccp_closed(struct ppp *ppp); 276 static struct compressor *find_compressor(int type); 277 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st); 278 static int ppp_create_interface(struct net *net, struct file *file, int *unit); 279 static void init_ppp_file(struct ppp_file *pf, int kind); 280 static void ppp_destroy_interface(struct ppp *ppp); 281 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit); 282 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit); 283 static int ppp_connect_channel(struct channel *pch, int unit); 284 static int ppp_disconnect_channel(struct channel *pch); 285 static void ppp_destroy_channel(struct channel *pch); 286 static int unit_get(struct idr *p, void *ptr); 287 static int unit_set(struct idr *p, void *ptr, int n); 288 static void unit_put(struct idr *p, int n); 289 static void *unit_find(struct idr *p, int n); 290 static void ppp_setup(struct net_device *dev); 291 292 static const struct net_device_ops ppp_netdev_ops; 293 294 static struct class *ppp_class; 295 296 /* per net-namespace data */ 297 static inline struct ppp_net *ppp_pernet(struct net *net) 298 { 299 return net_generic(net, ppp_net_id); 300 } 301 302 /* Translates a PPP protocol number to a NP index (NP == network protocol) */ 303 static inline int proto_to_npindex(int proto) 304 { 305 switch (proto) { 306 case PPP_IP: 307 return NP_IP; 308 case PPP_IPV6: 309 return NP_IPV6; 310 case PPP_IPX: 311 return NP_IPX; 312 case PPP_AT: 313 return NP_AT; 314 case PPP_MPLS_UC: 315 return NP_MPLS_UC; 316 case PPP_MPLS_MC: 317 return NP_MPLS_MC; 318 } 319 return -EINVAL; 320 } 321 322 /* Translates an NP index into a PPP protocol number */ 323 static const int npindex_to_proto[NUM_NP] = { 324 PPP_IP, 325 PPP_IPV6, 326 PPP_IPX, 327 PPP_AT, 328 PPP_MPLS_UC, 329 PPP_MPLS_MC, 330 }; 331 332 /* Translates an ethertype into an NP index */ 333 static inline int ethertype_to_npindex(int ethertype) 334 { 335 switch (ethertype) { 336 case ETH_P_IP: 337 return NP_IP; 338 case ETH_P_IPV6: 339 return NP_IPV6; 340 case ETH_P_IPX: 341 return NP_IPX; 342 case ETH_P_PPPTALK: 343 case ETH_P_ATALK: 344 return NP_AT; 345 case ETH_P_MPLS_UC: 346 return NP_MPLS_UC; 347 case ETH_P_MPLS_MC: 348 return NP_MPLS_MC; 349 } 350 return -1; 351 } 352 353 /* Translates an NP index into an ethertype */ 354 static const int npindex_to_ethertype[NUM_NP] = { 355 ETH_P_IP, 356 ETH_P_IPV6, 357 ETH_P_IPX, 358 ETH_P_PPPTALK, 359 ETH_P_MPLS_UC, 360 ETH_P_MPLS_MC, 361 }; 362 363 /* 364 * Locking shorthand. 365 */ 366 #define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock) 367 #define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock) 368 #define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock) 369 #define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock) 370 #define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \ 371 ppp_recv_lock(ppp); } while (0) 372 #define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \ 373 ppp_xmit_unlock(ppp); } while (0) 374 375 /* 376 * /dev/ppp device routines. 377 * The /dev/ppp device is used by pppd to control the ppp unit. 378 * It supports the read, write, ioctl and poll functions. 379 * Open instances of /dev/ppp can be in one of three states: 380 * unattached, attached to a ppp unit, or attached to a ppp channel. 381 */ 382 static int ppp_open(struct inode *inode, struct file *file) 383 { 384 /* 385 * This could (should?) be enforced by the permissions on /dev/ppp. 386 */ 387 if (!ns_capable(file->f_cred->user_ns, CAP_NET_ADMIN)) 388 return -EPERM; 389 return 0; 390 } 391 392 static int ppp_release(struct inode *unused, struct file *file) 393 { 394 struct ppp_file *pf = file->private_data; 395 struct ppp *ppp; 396 397 if (pf) { 398 file->private_data = NULL; 399 if (pf->kind == INTERFACE) { 400 ppp = PF_TO_PPP(pf); 401 rtnl_lock(); 402 if (file == ppp->owner) 403 unregister_netdevice(ppp->dev); 404 rtnl_unlock(); 405 } 406 if (refcount_dec_and_test(&pf->refcnt)) { 407 switch (pf->kind) { 408 case INTERFACE: 409 ppp_destroy_interface(PF_TO_PPP(pf)); 410 break; 411 case CHANNEL: 412 ppp_destroy_channel(PF_TO_CHANNEL(pf)); 413 break; 414 } 415 } 416 } 417 return 0; 418 } 419 420 static ssize_t ppp_read(struct file *file, char __user *buf, 421 size_t count, loff_t *ppos) 422 { 423 struct ppp_file *pf = file->private_data; 424 DECLARE_WAITQUEUE(wait, current); 425 ssize_t ret; 426 struct sk_buff *skb = NULL; 427 struct iovec iov; 428 struct iov_iter to; 429 430 ret = count; 431 432 if (!pf) 433 return -ENXIO; 434 add_wait_queue(&pf->rwait, &wait); 435 for (;;) { 436 set_current_state(TASK_INTERRUPTIBLE); 437 skb = skb_dequeue(&pf->rq); 438 if (skb) 439 break; 440 ret = 0; 441 if (pf->dead) 442 break; 443 if (pf->kind == INTERFACE) { 444 /* 445 * Return 0 (EOF) on an interface that has no 446 * channels connected, unless it is looping 447 * network traffic (demand mode). 448 */ 449 struct ppp *ppp = PF_TO_PPP(pf); 450 451 ppp_recv_lock(ppp); 452 if (ppp->n_channels == 0 && 453 (ppp->flags & SC_LOOP_TRAFFIC) == 0) { 454 ppp_recv_unlock(ppp); 455 break; 456 } 457 ppp_recv_unlock(ppp); 458 } 459 ret = -EAGAIN; 460 if (file->f_flags & O_NONBLOCK) 461 break; 462 ret = -ERESTARTSYS; 463 if (signal_pending(current)) 464 break; 465 schedule(); 466 } 467 set_current_state(TASK_RUNNING); 468 remove_wait_queue(&pf->rwait, &wait); 469 470 if (!skb) 471 goto out; 472 473 ret = -EOVERFLOW; 474 if (skb->len > count) 475 goto outf; 476 ret = -EFAULT; 477 iov.iov_base = buf; 478 iov.iov_len = count; 479 iov_iter_init(&to, READ, &iov, 1, count); 480 if (skb_copy_datagram_iter(skb, 0, &to, skb->len)) 481 goto outf; 482 ret = skb->len; 483 484 outf: 485 kfree_skb(skb); 486 out: 487 return ret; 488 } 489 490 static ssize_t ppp_write(struct file *file, const char __user *buf, 491 size_t count, loff_t *ppos) 492 { 493 struct ppp_file *pf = file->private_data; 494 struct sk_buff *skb; 495 ssize_t ret; 496 497 if (!pf) 498 return -ENXIO; 499 ret = -ENOMEM; 500 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); 501 if (!skb) 502 goto out; 503 skb_reserve(skb, pf->hdrlen); 504 ret = -EFAULT; 505 if (copy_from_user(skb_put(skb, count), buf, count)) { 506 kfree_skb(skb); 507 goto out; 508 } 509 510 switch (pf->kind) { 511 case INTERFACE: 512 ppp_xmit_process(PF_TO_PPP(pf), skb); 513 break; 514 case CHANNEL: 515 skb_queue_tail(&pf->xq, skb); 516 ppp_channel_push(PF_TO_CHANNEL(pf)); 517 break; 518 } 519 520 ret = count; 521 522 out: 523 return ret; 524 } 525 526 /* No kernel lock - fine */ 527 static __poll_t ppp_poll(struct file *file, poll_table *wait) 528 { 529 struct ppp_file *pf = file->private_data; 530 __poll_t mask; 531 532 if (!pf) 533 return 0; 534 poll_wait(file, &pf->rwait, wait); 535 mask = EPOLLOUT | EPOLLWRNORM; 536 if (skb_peek(&pf->rq)) 537 mask |= EPOLLIN | EPOLLRDNORM; 538 if (pf->dead) 539 mask |= EPOLLHUP; 540 else if (pf->kind == INTERFACE) { 541 /* see comment in ppp_read */ 542 struct ppp *ppp = PF_TO_PPP(pf); 543 544 ppp_recv_lock(ppp); 545 if (ppp->n_channels == 0 && 546 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 547 mask |= EPOLLIN | EPOLLRDNORM; 548 ppp_recv_unlock(ppp); 549 } 550 551 return mask; 552 } 553 554 #ifdef CONFIG_PPP_FILTER 555 static struct bpf_prog *get_filter(struct sock_fprog *uprog) 556 { 557 struct sock_fprog_kern fprog; 558 struct bpf_prog *res = NULL; 559 int err; 560 561 if (!uprog->len) 562 return NULL; 563 564 /* uprog->len is unsigned short, so no overflow here */ 565 fprog.len = uprog->len; 566 fprog.filter = memdup_user(uprog->filter, 567 uprog->len * sizeof(struct sock_filter)); 568 if (IS_ERR(fprog.filter)) 569 return ERR_CAST(fprog.filter); 570 571 err = bpf_prog_create(&res, &fprog); 572 kfree(fprog.filter); 573 574 return err ? ERR_PTR(err) : res; 575 } 576 577 static struct bpf_prog *ppp_get_filter(struct sock_fprog __user *p) 578 { 579 struct sock_fprog uprog; 580 581 if (copy_from_user(&uprog, p, sizeof(struct sock_fprog))) 582 return ERR_PTR(-EFAULT); 583 return get_filter(&uprog); 584 } 585 586 #ifdef CONFIG_COMPAT 587 struct sock_fprog32 { 588 unsigned short len; 589 compat_caddr_t filter; 590 }; 591 592 #define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32) 593 #define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32) 594 595 static struct bpf_prog *compat_ppp_get_filter(struct sock_fprog32 __user *p) 596 { 597 struct sock_fprog32 uprog32; 598 struct sock_fprog uprog; 599 600 if (copy_from_user(&uprog32, p, sizeof(struct sock_fprog32))) 601 return ERR_PTR(-EFAULT); 602 uprog.len = uprog32.len; 603 uprog.filter = compat_ptr(uprog32.filter); 604 return get_filter(&uprog); 605 } 606 #endif 607 #endif 608 609 static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 610 { 611 struct ppp_file *pf; 612 struct ppp *ppp; 613 int err = -EFAULT, val, val2, i; 614 struct ppp_idle32 idle32; 615 struct ppp_idle64 idle64; 616 struct npioctl npi; 617 int unit, cflags; 618 struct slcompress *vj; 619 void __user *argp = (void __user *)arg; 620 int __user *p = argp; 621 622 mutex_lock(&ppp_mutex); 623 624 pf = file->private_data; 625 if (!pf) { 626 err = ppp_unattached_ioctl(current->nsproxy->net_ns, 627 pf, file, cmd, arg); 628 goto out; 629 } 630 631 if (cmd == PPPIOCDETACH) { 632 /* 633 * PPPIOCDETACH is no longer supported as it was heavily broken, 634 * and is only known to have been used by pppd older than 635 * ppp-2.4.2 (released November 2003). 636 */ 637 pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n", 638 current->comm, current->pid); 639 err = -EINVAL; 640 goto out; 641 } 642 643 if (pf->kind == CHANNEL) { 644 struct channel *pch; 645 struct ppp_channel *chan; 646 647 pch = PF_TO_CHANNEL(pf); 648 649 switch (cmd) { 650 case PPPIOCCONNECT: 651 if (get_user(unit, p)) 652 break; 653 err = ppp_connect_channel(pch, unit); 654 break; 655 656 case PPPIOCDISCONN: 657 err = ppp_disconnect_channel(pch); 658 break; 659 660 default: 661 down_read(&pch->chan_sem); 662 chan = pch->chan; 663 err = -ENOTTY; 664 if (chan && chan->ops->ioctl) 665 err = chan->ops->ioctl(chan, cmd, arg); 666 up_read(&pch->chan_sem); 667 } 668 goto out; 669 } 670 671 if (pf->kind != INTERFACE) { 672 /* can't happen */ 673 pr_err("PPP: not interface or channel??\n"); 674 err = -EINVAL; 675 goto out; 676 } 677 678 ppp = PF_TO_PPP(pf); 679 switch (cmd) { 680 case PPPIOCSMRU: 681 if (get_user(val, p)) 682 break; 683 ppp->mru = val; 684 err = 0; 685 break; 686 687 case PPPIOCSFLAGS: 688 if (get_user(val, p)) 689 break; 690 ppp_lock(ppp); 691 cflags = ppp->flags & ~val; 692 #ifdef CONFIG_PPP_MULTILINK 693 if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK)) 694 ppp->nextseq = 0; 695 #endif 696 ppp->flags = val & SC_FLAG_BITS; 697 ppp_unlock(ppp); 698 if (cflags & SC_CCP_OPEN) 699 ppp_ccp_closed(ppp); 700 err = 0; 701 break; 702 703 case PPPIOCGFLAGS: 704 val = ppp->flags | ppp->xstate | ppp->rstate; 705 if (put_user(val, p)) 706 break; 707 err = 0; 708 break; 709 710 case PPPIOCSCOMPRESS: 711 { 712 struct ppp_option_data data; 713 if (copy_from_user(&data, argp, sizeof(data))) 714 err = -EFAULT; 715 else 716 err = ppp_set_compress(ppp, &data); 717 break; 718 } 719 case PPPIOCGUNIT: 720 if (put_user(ppp->file.index, p)) 721 break; 722 err = 0; 723 break; 724 725 case PPPIOCSDEBUG: 726 if (get_user(val, p)) 727 break; 728 ppp->debug = val; 729 err = 0; 730 break; 731 732 case PPPIOCGDEBUG: 733 if (put_user(ppp->debug, p)) 734 break; 735 err = 0; 736 break; 737 738 case PPPIOCGIDLE32: 739 idle32.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 740 idle32.recv_idle = (jiffies - ppp->last_recv) / HZ; 741 if (copy_to_user(argp, &idle32, sizeof(idle32))) 742 break; 743 err = 0; 744 break; 745 746 case PPPIOCGIDLE64: 747 idle64.xmit_idle = (jiffies - ppp->last_xmit) / HZ; 748 idle64.recv_idle = (jiffies - ppp->last_recv) / HZ; 749 if (copy_to_user(argp, &idle64, sizeof(idle64))) 750 break; 751 err = 0; 752 break; 753 754 case PPPIOCSMAXCID: 755 if (get_user(val, p)) 756 break; 757 val2 = 15; 758 if ((val >> 16) != 0) { 759 val2 = val >> 16; 760 val &= 0xffff; 761 } 762 vj = slhc_init(val2+1, val+1); 763 if (IS_ERR(vj)) { 764 err = PTR_ERR(vj); 765 break; 766 } 767 ppp_lock(ppp); 768 if (ppp->vj) 769 slhc_free(ppp->vj); 770 ppp->vj = vj; 771 ppp_unlock(ppp); 772 err = 0; 773 break; 774 775 case PPPIOCGNPMODE: 776 case PPPIOCSNPMODE: 777 if (copy_from_user(&npi, argp, sizeof(npi))) 778 break; 779 err = proto_to_npindex(npi.protocol); 780 if (err < 0) 781 break; 782 i = err; 783 if (cmd == PPPIOCGNPMODE) { 784 err = -EFAULT; 785 npi.mode = ppp->npmode[i]; 786 if (copy_to_user(argp, &npi, sizeof(npi))) 787 break; 788 } else { 789 ppp->npmode[i] = npi.mode; 790 /* we may be able to transmit more packets now (??) */ 791 netif_wake_queue(ppp->dev); 792 } 793 err = 0; 794 break; 795 796 #ifdef CONFIG_PPP_FILTER 797 case PPPIOCSPASS: 798 case PPPIOCSACTIVE: 799 { 800 struct bpf_prog *filter = ppp_get_filter(argp); 801 struct bpf_prog **which; 802 803 if (IS_ERR(filter)) { 804 err = PTR_ERR(filter); 805 break; 806 } 807 if (cmd == PPPIOCSPASS) 808 which = &ppp->pass_filter; 809 else 810 which = &ppp->active_filter; 811 ppp_lock(ppp); 812 if (*which) 813 bpf_prog_destroy(*which); 814 *which = filter; 815 ppp_unlock(ppp); 816 err = 0; 817 break; 818 } 819 #endif /* CONFIG_PPP_FILTER */ 820 821 #ifdef CONFIG_PPP_MULTILINK 822 case PPPIOCSMRRU: 823 if (get_user(val, p)) 824 break; 825 ppp_recv_lock(ppp); 826 ppp->mrru = val; 827 ppp_recv_unlock(ppp); 828 err = 0; 829 break; 830 #endif /* CONFIG_PPP_MULTILINK */ 831 832 default: 833 err = -ENOTTY; 834 } 835 836 out: 837 mutex_unlock(&ppp_mutex); 838 839 return err; 840 } 841 842 #ifdef CONFIG_COMPAT 843 struct ppp_option_data32 { 844 compat_uptr_t ptr; 845 u32 length; 846 compat_int_t transmit; 847 }; 848 #define PPPIOCSCOMPRESS32 _IOW('t', 77, struct ppp_option_data32) 849 850 static long ppp_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 851 { 852 struct ppp_file *pf; 853 int err = -ENOIOCTLCMD; 854 void __user *argp = (void __user *)arg; 855 856 mutex_lock(&ppp_mutex); 857 858 pf = file->private_data; 859 if (pf && pf->kind == INTERFACE) { 860 struct ppp *ppp = PF_TO_PPP(pf); 861 switch (cmd) { 862 #ifdef CONFIG_PPP_FILTER 863 case PPPIOCSPASS32: 864 case PPPIOCSACTIVE32: 865 { 866 struct bpf_prog *filter = compat_ppp_get_filter(argp); 867 struct bpf_prog **which; 868 869 if (IS_ERR(filter)) { 870 err = PTR_ERR(filter); 871 break; 872 } 873 if (cmd == PPPIOCSPASS32) 874 which = &ppp->pass_filter; 875 else 876 which = &ppp->active_filter; 877 ppp_lock(ppp); 878 if (*which) 879 bpf_prog_destroy(*which); 880 *which = filter; 881 ppp_unlock(ppp); 882 err = 0; 883 break; 884 } 885 #endif /* CONFIG_PPP_FILTER */ 886 case PPPIOCSCOMPRESS32: 887 { 888 struct ppp_option_data32 data32; 889 if (copy_from_user(&data32, argp, sizeof(data32))) { 890 err = -EFAULT; 891 } else { 892 struct ppp_option_data data = { 893 .ptr = compat_ptr(data32.ptr), 894 .length = data32.length, 895 .transmit = data32.transmit 896 }; 897 err = ppp_set_compress(ppp, &data); 898 } 899 break; 900 } 901 } 902 } 903 mutex_unlock(&ppp_mutex); 904 905 /* all other commands have compatible arguments */ 906 if (err == -ENOIOCTLCMD) 907 err = ppp_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); 908 909 return err; 910 } 911 #endif 912 913 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 914 struct file *file, unsigned int cmd, unsigned long arg) 915 { 916 int unit, err = -EFAULT; 917 struct ppp *ppp; 918 struct channel *chan; 919 struct ppp_net *pn; 920 int __user *p = (int __user *)arg; 921 922 switch (cmd) { 923 case PPPIOCNEWUNIT: 924 /* Create a new ppp unit */ 925 if (get_user(unit, p)) 926 break; 927 err = ppp_create_interface(net, file, &unit); 928 if (err < 0) 929 break; 930 931 err = -EFAULT; 932 if (put_user(unit, p)) 933 break; 934 err = 0; 935 break; 936 937 case PPPIOCATTACH: 938 /* Attach to an existing ppp unit */ 939 if (get_user(unit, p)) 940 break; 941 err = -ENXIO; 942 pn = ppp_pernet(net); 943 mutex_lock(&pn->all_ppp_mutex); 944 ppp = ppp_find_unit(pn, unit); 945 if (ppp) { 946 refcount_inc(&ppp->file.refcnt); 947 file->private_data = &ppp->file; 948 err = 0; 949 } 950 mutex_unlock(&pn->all_ppp_mutex); 951 break; 952 953 case PPPIOCATTCHAN: 954 if (get_user(unit, p)) 955 break; 956 err = -ENXIO; 957 pn = ppp_pernet(net); 958 spin_lock_bh(&pn->all_channels_lock); 959 chan = ppp_find_channel(pn, unit); 960 if (chan) { 961 refcount_inc(&chan->file.refcnt); 962 file->private_data = &chan->file; 963 err = 0; 964 } 965 spin_unlock_bh(&pn->all_channels_lock); 966 break; 967 968 default: 969 err = -ENOTTY; 970 } 971 972 return err; 973 } 974 975 static const struct file_operations ppp_device_fops = { 976 .owner = THIS_MODULE, 977 .read = ppp_read, 978 .write = ppp_write, 979 .poll = ppp_poll, 980 .unlocked_ioctl = ppp_ioctl, 981 #ifdef CONFIG_COMPAT 982 .compat_ioctl = ppp_compat_ioctl, 983 #endif 984 .open = ppp_open, 985 .release = ppp_release, 986 .llseek = noop_llseek, 987 }; 988 989 static __net_init int ppp_init_net(struct net *net) 990 { 991 struct ppp_net *pn = net_generic(net, ppp_net_id); 992 993 idr_init(&pn->units_idr); 994 mutex_init(&pn->all_ppp_mutex); 995 996 INIT_LIST_HEAD(&pn->all_channels); 997 INIT_LIST_HEAD(&pn->new_channels); 998 999 spin_lock_init(&pn->all_channels_lock); 1000 1001 return 0; 1002 } 1003 1004 static __net_exit void ppp_exit_net(struct net *net) 1005 { 1006 struct ppp_net *pn = net_generic(net, ppp_net_id); 1007 struct net_device *dev; 1008 struct net_device *aux; 1009 struct ppp *ppp; 1010 LIST_HEAD(list); 1011 int id; 1012 1013 rtnl_lock(); 1014 for_each_netdev_safe(net, dev, aux) { 1015 if (dev->netdev_ops == &ppp_netdev_ops) 1016 unregister_netdevice_queue(dev, &list); 1017 } 1018 1019 idr_for_each_entry(&pn->units_idr, ppp, id) 1020 /* Skip devices already unregistered by previous loop */ 1021 if (!net_eq(dev_net(ppp->dev), net)) 1022 unregister_netdevice_queue(ppp->dev, &list); 1023 1024 unregister_netdevice_many(&list); 1025 rtnl_unlock(); 1026 1027 mutex_destroy(&pn->all_ppp_mutex); 1028 idr_destroy(&pn->units_idr); 1029 WARN_ON_ONCE(!list_empty(&pn->all_channels)); 1030 WARN_ON_ONCE(!list_empty(&pn->new_channels)); 1031 } 1032 1033 static struct pernet_operations ppp_net_ops = { 1034 .init = ppp_init_net, 1035 .exit = ppp_exit_net, 1036 .id = &ppp_net_id, 1037 .size = sizeof(struct ppp_net), 1038 }; 1039 1040 static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) 1041 { 1042 struct ppp_net *pn = ppp_pernet(ppp->ppp_net); 1043 int ret; 1044 1045 mutex_lock(&pn->all_ppp_mutex); 1046 1047 if (unit < 0) { 1048 ret = unit_get(&pn->units_idr, ppp); 1049 if (ret < 0) 1050 goto err; 1051 } else { 1052 /* Caller asked for a specific unit number. Fail with -EEXIST 1053 * if unavailable. For backward compatibility, return -EEXIST 1054 * too if idr allocation fails; this makes pppd retry without 1055 * requesting a specific unit number. 1056 */ 1057 if (unit_find(&pn->units_idr, unit)) { 1058 ret = -EEXIST; 1059 goto err; 1060 } 1061 ret = unit_set(&pn->units_idr, ppp, unit); 1062 if (ret < 0) { 1063 /* Rewrite error for backward compatibility */ 1064 ret = -EEXIST; 1065 goto err; 1066 } 1067 } 1068 ppp->file.index = ret; 1069 1070 if (!ifname_is_set) 1071 snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); 1072 1073 mutex_unlock(&pn->all_ppp_mutex); 1074 1075 ret = register_netdevice(ppp->dev); 1076 if (ret < 0) 1077 goto err_unit; 1078 1079 atomic_inc(&ppp_unit_count); 1080 1081 return 0; 1082 1083 err_unit: 1084 mutex_lock(&pn->all_ppp_mutex); 1085 unit_put(&pn->units_idr, ppp->file.index); 1086 err: 1087 mutex_unlock(&pn->all_ppp_mutex); 1088 1089 return ret; 1090 } 1091 1092 static int ppp_dev_configure(struct net *src_net, struct net_device *dev, 1093 const struct ppp_config *conf) 1094 { 1095 struct ppp *ppp = netdev_priv(dev); 1096 int indx; 1097 int err; 1098 int cpu; 1099 1100 ppp->dev = dev; 1101 ppp->ppp_net = src_net; 1102 ppp->mru = PPP_MRU; 1103 ppp->owner = conf->file; 1104 1105 init_ppp_file(&ppp->file, INTERFACE); 1106 ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */ 1107 1108 for (indx = 0; indx < NUM_NP; ++indx) 1109 ppp->npmode[indx] = NPMODE_PASS; 1110 INIT_LIST_HEAD(&ppp->channels); 1111 spin_lock_init(&ppp->rlock); 1112 spin_lock_init(&ppp->wlock); 1113 1114 ppp->xmit_recursion = alloc_percpu(int); 1115 if (!ppp->xmit_recursion) { 1116 err = -ENOMEM; 1117 goto err1; 1118 } 1119 for_each_possible_cpu(cpu) 1120 (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0; 1121 1122 #ifdef CONFIG_PPP_MULTILINK 1123 ppp->minseq = -1; 1124 skb_queue_head_init(&ppp->mrq); 1125 #endif /* CONFIG_PPP_MULTILINK */ 1126 #ifdef CONFIG_PPP_FILTER 1127 ppp->pass_filter = NULL; 1128 ppp->active_filter = NULL; 1129 #endif /* CONFIG_PPP_FILTER */ 1130 1131 err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); 1132 if (err < 0) 1133 goto err2; 1134 1135 conf->file->private_data = &ppp->file; 1136 1137 return 0; 1138 err2: 1139 free_percpu(ppp->xmit_recursion); 1140 err1: 1141 return err; 1142 } 1143 1144 static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { 1145 [IFLA_PPP_DEV_FD] = { .type = NLA_S32 }, 1146 }; 1147 1148 static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[], 1149 struct netlink_ext_ack *extack) 1150 { 1151 if (!data) 1152 return -EINVAL; 1153 1154 if (!data[IFLA_PPP_DEV_FD]) 1155 return -EINVAL; 1156 if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0) 1157 return -EBADF; 1158 1159 return 0; 1160 } 1161 1162 static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, 1163 struct nlattr *tb[], struct nlattr *data[], 1164 struct netlink_ext_ack *extack) 1165 { 1166 struct ppp_config conf = { 1167 .unit = -1, 1168 .ifname_is_set = true, 1169 }; 1170 struct file *file; 1171 int err; 1172 1173 file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD])); 1174 if (!file) 1175 return -EBADF; 1176 1177 /* rtnl_lock is already held here, but ppp_create_interface() locks 1178 * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids 1179 * possible deadlock due to lock order inversion, at the cost of 1180 * pushing the problem back to userspace. 1181 */ 1182 if (!mutex_trylock(&ppp_mutex)) { 1183 err = -EBUSY; 1184 goto out; 1185 } 1186 1187 if (file->f_op != &ppp_device_fops || file->private_data) { 1188 err = -EBADF; 1189 goto out_unlock; 1190 } 1191 1192 conf.file = file; 1193 1194 /* Don't use device name generated by the rtnetlink layer when ifname 1195 * isn't specified. Let ppp_dev_configure() set the device name using 1196 * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows 1197 * userspace to infer the device name using to the PPPIOCGUNIT ioctl. 1198 */ 1199 if (!tb[IFLA_IFNAME]) 1200 conf.ifname_is_set = false; 1201 1202 err = ppp_dev_configure(src_net, dev, &conf); 1203 1204 out_unlock: 1205 mutex_unlock(&ppp_mutex); 1206 out: 1207 fput(file); 1208 1209 return err; 1210 } 1211 1212 static void ppp_nl_dellink(struct net_device *dev, struct list_head *head) 1213 { 1214 unregister_netdevice_queue(dev, head); 1215 } 1216 1217 static size_t ppp_nl_get_size(const struct net_device *dev) 1218 { 1219 return 0; 1220 } 1221 1222 static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev) 1223 { 1224 return 0; 1225 } 1226 1227 static struct net *ppp_nl_get_link_net(const struct net_device *dev) 1228 { 1229 struct ppp *ppp = netdev_priv(dev); 1230 1231 return ppp->ppp_net; 1232 } 1233 1234 static struct rtnl_link_ops ppp_link_ops __read_mostly = { 1235 .kind = "ppp", 1236 .maxtype = IFLA_PPP_MAX, 1237 .policy = ppp_nl_policy, 1238 .priv_size = sizeof(struct ppp), 1239 .setup = ppp_setup, 1240 .validate = ppp_nl_validate, 1241 .newlink = ppp_nl_newlink, 1242 .dellink = ppp_nl_dellink, 1243 .get_size = ppp_nl_get_size, 1244 .fill_info = ppp_nl_fill_info, 1245 .get_link_net = ppp_nl_get_link_net, 1246 }; 1247 1248 #define PPP_MAJOR 108 1249 1250 /* Called at boot time if ppp is compiled into the kernel, 1251 or at module load time (from init_module) if compiled as a module. */ 1252 static int __init ppp_init(void) 1253 { 1254 int err; 1255 1256 pr_info("PPP generic driver version " PPP_VERSION "\n"); 1257 1258 err = register_pernet_device(&ppp_net_ops); 1259 if (err) { 1260 pr_err("failed to register PPP pernet device (%d)\n", err); 1261 goto out; 1262 } 1263 1264 err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops); 1265 if (err) { 1266 pr_err("failed to register PPP device (%d)\n", err); 1267 goto out_net; 1268 } 1269 1270 ppp_class = class_create(THIS_MODULE, "ppp"); 1271 if (IS_ERR(ppp_class)) { 1272 err = PTR_ERR(ppp_class); 1273 goto out_chrdev; 1274 } 1275 1276 err = rtnl_link_register(&ppp_link_ops); 1277 if (err) { 1278 pr_err("failed to register rtnetlink PPP handler\n"); 1279 goto out_class; 1280 } 1281 1282 /* not a big deal if we fail here :-) */ 1283 device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 1284 1285 return 0; 1286 1287 out_class: 1288 class_destroy(ppp_class); 1289 out_chrdev: 1290 unregister_chrdev(PPP_MAJOR, "ppp"); 1291 out_net: 1292 unregister_pernet_device(&ppp_net_ops); 1293 out: 1294 return err; 1295 } 1296 1297 /* 1298 * Network interface unit routines. 1299 */ 1300 static netdev_tx_t 1301 ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) 1302 { 1303 struct ppp *ppp = netdev_priv(dev); 1304 int npi, proto; 1305 unsigned char *pp; 1306 1307 npi = ethertype_to_npindex(ntohs(skb->protocol)); 1308 if (npi < 0) 1309 goto outf; 1310 1311 /* Drop, accept or reject the packet */ 1312 switch (ppp->npmode[npi]) { 1313 case NPMODE_PASS: 1314 break; 1315 case NPMODE_QUEUE: 1316 /* it would be nice to have a way to tell the network 1317 system to queue this one up for later. */ 1318 goto outf; 1319 case NPMODE_DROP: 1320 case NPMODE_ERROR: 1321 goto outf; 1322 } 1323 1324 /* Put the 2-byte PPP protocol number on the front, 1325 making sure there is room for the address and control fields. */ 1326 if (skb_cow_head(skb, PPP_HDRLEN)) 1327 goto outf; 1328 1329 pp = skb_push(skb, 2); 1330 proto = npindex_to_proto[npi]; 1331 put_unaligned_be16(proto, pp); 1332 1333 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); 1334 ppp_xmit_process(ppp, skb); 1335 1336 return NETDEV_TX_OK; 1337 1338 outf: 1339 kfree_skb(skb); 1340 ++dev->stats.tx_dropped; 1341 return NETDEV_TX_OK; 1342 } 1343 1344 static int 1345 ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1346 { 1347 struct ppp *ppp = netdev_priv(dev); 1348 int err = -EFAULT; 1349 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; 1350 struct ppp_stats stats; 1351 struct ppp_comp_stats cstats; 1352 char *vers; 1353 1354 switch (cmd) { 1355 case SIOCGPPPSTATS: 1356 ppp_get_stats(ppp, &stats); 1357 if (copy_to_user(addr, &stats, sizeof(stats))) 1358 break; 1359 err = 0; 1360 break; 1361 1362 case SIOCGPPPCSTATS: 1363 memset(&cstats, 0, sizeof(cstats)); 1364 if (ppp->xc_state) 1365 ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c); 1366 if (ppp->rc_state) 1367 ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d); 1368 if (copy_to_user(addr, &cstats, sizeof(cstats))) 1369 break; 1370 err = 0; 1371 break; 1372 1373 case SIOCGPPPVER: 1374 vers = PPP_VERSION; 1375 if (copy_to_user(addr, vers, strlen(vers) + 1)) 1376 break; 1377 err = 0; 1378 break; 1379 1380 default: 1381 err = -EINVAL; 1382 } 1383 1384 return err; 1385 } 1386 1387 static void 1388 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) 1389 { 1390 struct ppp *ppp = netdev_priv(dev); 1391 1392 ppp_recv_lock(ppp); 1393 stats64->rx_packets = ppp->stats64.rx_packets; 1394 stats64->rx_bytes = ppp->stats64.rx_bytes; 1395 ppp_recv_unlock(ppp); 1396 1397 ppp_xmit_lock(ppp); 1398 stats64->tx_packets = ppp->stats64.tx_packets; 1399 stats64->tx_bytes = ppp->stats64.tx_bytes; 1400 ppp_xmit_unlock(ppp); 1401 1402 stats64->rx_errors = dev->stats.rx_errors; 1403 stats64->tx_errors = dev->stats.tx_errors; 1404 stats64->rx_dropped = dev->stats.rx_dropped; 1405 stats64->tx_dropped = dev->stats.tx_dropped; 1406 stats64->rx_length_errors = dev->stats.rx_length_errors; 1407 } 1408 1409 static int ppp_dev_init(struct net_device *dev) 1410 { 1411 struct ppp *ppp; 1412 1413 ppp = netdev_priv(dev); 1414 /* Let the netdevice take a reference on the ppp file. This ensures 1415 * that ppp_destroy_interface() won't run before the device gets 1416 * unregistered. 1417 */ 1418 refcount_inc(&ppp->file.refcnt); 1419 1420 return 0; 1421 } 1422 1423 static void ppp_dev_uninit(struct net_device *dev) 1424 { 1425 struct ppp *ppp = netdev_priv(dev); 1426 struct ppp_net *pn = ppp_pernet(ppp->ppp_net); 1427 1428 ppp_lock(ppp); 1429 ppp->closing = 1; 1430 ppp_unlock(ppp); 1431 1432 mutex_lock(&pn->all_ppp_mutex); 1433 unit_put(&pn->units_idr, ppp->file.index); 1434 mutex_unlock(&pn->all_ppp_mutex); 1435 1436 ppp->owner = NULL; 1437 1438 ppp->file.dead = 1; 1439 wake_up_interruptible(&ppp->file.rwait); 1440 } 1441 1442 static void ppp_dev_priv_destructor(struct net_device *dev) 1443 { 1444 struct ppp *ppp; 1445 1446 ppp = netdev_priv(dev); 1447 if (refcount_dec_and_test(&ppp->file.refcnt)) 1448 ppp_destroy_interface(ppp); 1449 } 1450 1451 static const struct net_device_ops ppp_netdev_ops = { 1452 .ndo_init = ppp_dev_init, 1453 .ndo_uninit = ppp_dev_uninit, 1454 .ndo_start_xmit = ppp_start_xmit, 1455 .ndo_do_ioctl = ppp_net_ioctl, 1456 .ndo_get_stats64 = ppp_get_stats64, 1457 }; 1458 1459 static struct device_type ppp_type = { 1460 .name = "ppp", 1461 }; 1462 1463 static void ppp_setup(struct net_device *dev) 1464 { 1465 dev->netdev_ops = &ppp_netdev_ops; 1466 SET_NETDEV_DEVTYPE(dev, &ppp_type); 1467 1468 dev->features |= NETIF_F_LLTX; 1469 1470 dev->hard_header_len = PPP_HDRLEN; 1471 dev->mtu = PPP_MRU; 1472 dev->addr_len = 0; 1473 dev->tx_queue_len = 3; 1474 dev->type = ARPHRD_PPP; 1475 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 1476 dev->priv_destructor = ppp_dev_priv_destructor; 1477 netif_keep_dst(dev); 1478 } 1479 1480 /* 1481 * Transmit-side routines. 1482 */ 1483 1484 /* Called to do any work queued up on the transmit side that can now be done */ 1485 static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) 1486 { 1487 ppp_xmit_lock(ppp); 1488 if (!ppp->closing) { 1489 ppp_push(ppp); 1490 1491 if (skb) 1492 skb_queue_tail(&ppp->file.xq, skb); 1493 while (!ppp->xmit_pending && 1494 (skb = skb_dequeue(&ppp->file.xq))) 1495 ppp_send_frame(ppp, skb); 1496 /* If there's no work left to do, tell the core net 1497 code that we can accept some more. */ 1498 if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) 1499 netif_wake_queue(ppp->dev); 1500 else 1501 netif_stop_queue(ppp->dev); 1502 } else { 1503 kfree_skb(skb); 1504 } 1505 ppp_xmit_unlock(ppp); 1506 } 1507 1508 static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb) 1509 { 1510 local_bh_disable(); 1511 1512 if (unlikely(*this_cpu_ptr(ppp->xmit_recursion))) 1513 goto err; 1514 1515 (*this_cpu_ptr(ppp->xmit_recursion))++; 1516 __ppp_xmit_process(ppp, skb); 1517 (*this_cpu_ptr(ppp->xmit_recursion))--; 1518 1519 local_bh_enable(); 1520 1521 return; 1522 1523 err: 1524 local_bh_enable(); 1525 1526 kfree_skb(skb); 1527 1528 if (net_ratelimit()) 1529 netdev_err(ppp->dev, "recursion detected\n"); 1530 } 1531 1532 static inline struct sk_buff * 1533 pad_compress_skb(struct ppp *ppp, struct sk_buff *skb) 1534 { 1535 struct sk_buff *new_skb; 1536 int len; 1537 int new_skb_size = ppp->dev->mtu + 1538 ppp->xcomp->comp_extra + ppp->dev->hard_header_len; 1539 int compressor_skb_size = ppp->dev->mtu + 1540 ppp->xcomp->comp_extra + PPP_HDRLEN; 1541 new_skb = alloc_skb(new_skb_size, GFP_ATOMIC); 1542 if (!new_skb) { 1543 if (net_ratelimit()) 1544 netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n"); 1545 return NULL; 1546 } 1547 if (ppp->dev->hard_header_len > PPP_HDRLEN) 1548 skb_reserve(new_skb, 1549 ppp->dev->hard_header_len - PPP_HDRLEN); 1550 1551 /* compressor still expects A/C bytes in hdr */ 1552 len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2, 1553 new_skb->data, skb->len + 2, 1554 compressor_skb_size); 1555 if (len > 0 && (ppp->flags & SC_CCP_UP)) { 1556 consume_skb(skb); 1557 skb = new_skb; 1558 skb_put(skb, len); 1559 skb_pull(skb, 2); /* pull off A/C bytes */ 1560 } else if (len == 0) { 1561 /* didn't compress, or CCP not up yet */ 1562 consume_skb(new_skb); 1563 new_skb = skb; 1564 } else { 1565 /* 1566 * (len < 0) 1567 * MPPE requires that we do not send unencrypted 1568 * frames. The compressor will return -1 if we 1569 * should drop the frame. We cannot simply test 1570 * the compress_proto because MPPE and MPPC share 1571 * the same number. 1572 */ 1573 if (net_ratelimit()) 1574 netdev_err(ppp->dev, "ppp: compressor dropped pkt\n"); 1575 kfree_skb(skb); 1576 consume_skb(new_skb); 1577 new_skb = NULL; 1578 } 1579 return new_skb; 1580 } 1581 1582 /* 1583 * Compress and send a frame. 1584 * The caller should have locked the xmit path, 1585 * and xmit_pending should be 0. 1586 */ 1587 static void 1588 ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) 1589 { 1590 int proto = PPP_PROTO(skb); 1591 struct sk_buff *new_skb; 1592 int len; 1593 unsigned char *cp; 1594 1595 if (proto < 0x8000) { 1596 #ifdef CONFIG_PPP_FILTER 1597 /* check if we should pass this packet */ 1598 /* the filter instructions are constructed assuming 1599 a four-byte PPP header on each packet */ 1600 *(u8 *)skb_push(skb, 2) = 1; 1601 if (ppp->pass_filter && 1602 BPF_PROG_RUN(ppp->pass_filter, skb) == 0) { 1603 if (ppp->debug & 1) 1604 netdev_printk(KERN_DEBUG, ppp->dev, 1605 "PPP: outbound frame " 1606 "not passed\n"); 1607 kfree_skb(skb); 1608 return; 1609 } 1610 /* if this packet passes the active filter, record the time */ 1611 if (!(ppp->active_filter && 1612 BPF_PROG_RUN(ppp->active_filter, skb) == 0)) 1613 ppp->last_xmit = jiffies; 1614 skb_pull(skb, 2); 1615 #else 1616 /* for data packets, record the time */ 1617 ppp->last_xmit = jiffies; 1618 #endif /* CONFIG_PPP_FILTER */ 1619 } 1620 1621 ++ppp->stats64.tx_packets; 1622 ppp->stats64.tx_bytes += skb->len - 2; 1623 1624 switch (proto) { 1625 case PPP_IP: 1626 if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0) 1627 break; 1628 /* try to do VJ TCP header compression */ 1629 new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2, 1630 GFP_ATOMIC); 1631 if (!new_skb) { 1632 netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n"); 1633 goto drop; 1634 } 1635 skb_reserve(new_skb, ppp->dev->hard_header_len - 2); 1636 cp = skb->data + 2; 1637 len = slhc_compress(ppp->vj, cp, skb->len - 2, 1638 new_skb->data + 2, &cp, 1639 !(ppp->flags & SC_NO_TCP_CCID)); 1640 if (cp == skb->data + 2) { 1641 /* didn't compress */ 1642 consume_skb(new_skb); 1643 } else { 1644 if (cp[0] & SL_TYPE_COMPRESSED_TCP) { 1645 proto = PPP_VJC_COMP; 1646 cp[0] &= ~SL_TYPE_COMPRESSED_TCP; 1647 } else { 1648 proto = PPP_VJC_UNCOMP; 1649 cp[0] = skb->data[2]; 1650 } 1651 consume_skb(skb); 1652 skb = new_skb; 1653 cp = skb_put(skb, len + 2); 1654 cp[0] = 0; 1655 cp[1] = proto; 1656 } 1657 break; 1658 1659 case PPP_CCP: 1660 /* peek at outbound CCP frames */ 1661 ppp_ccp_peek(ppp, skb, 0); 1662 break; 1663 } 1664 1665 /* try to do packet compression */ 1666 if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state && 1667 proto != PPP_LCP && proto != PPP_CCP) { 1668 if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) { 1669 if (net_ratelimit()) 1670 netdev_err(ppp->dev, 1671 "ppp: compression required but " 1672 "down - pkt dropped.\n"); 1673 goto drop; 1674 } 1675 skb = pad_compress_skb(ppp, skb); 1676 if (!skb) 1677 goto drop; 1678 } 1679 1680 /* 1681 * If we are waiting for traffic (demand dialling), 1682 * queue it up for pppd to receive. 1683 */ 1684 if (ppp->flags & SC_LOOP_TRAFFIC) { 1685 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) 1686 goto drop; 1687 skb_queue_tail(&ppp->file.rq, skb); 1688 wake_up_interruptible(&ppp->file.rwait); 1689 return; 1690 } 1691 1692 ppp->xmit_pending = skb; 1693 ppp_push(ppp); 1694 return; 1695 1696 drop: 1697 kfree_skb(skb); 1698 ++ppp->dev->stats.tx_errors; 1699 } 1700 1701 /* 1702 * Try to send the frame in xmit_pending. 1703 * The caller should have the xmit path locked. 1704 */ 1705 static void 1706 ppp_push(struct ppp *ppp) 1707 { 1708 struct list_head *list; 1709 struct channel *pch; 1710 struct sk_buff *skb = ppp->xmit_pending; 1711 1712 if (!skb) 1713 return; 1714 1715 list = &ppp->channels; 1716 if (list_empty(list)) { 1717 /* nowhere to send the packet, just drop it */ 1718 ppp->xmit_pending = NULL; 1719 kfree_skb(skb); 1720 return; 1721 } 1722 1723 if ((ppp->flags & SC_MULTILINK) == 0) { 1724 /* not doing multilink: send it down the first channel */ 1725 list = list->next; 1726 pch = list_entry(list, struct channel, clist); 1727 1728 spin_lock(&pch->downl); 1729 if (pch->chan) { 1730 if (pch->chan->ops->start_xmit(pch->chan, skb)) 1731 ppp->xmit_pending = NULL; 1732 } else { 1733 /* channel got unregistered */ 1734 kfree_skb(skb); 1735 ppp->xmit_pending = NULL; 1736 } 1737 spin_unlock(&pch->downl); 1738 return; 1739 } 1740 1741 #ifdef CONFIG_PPP_MULTILINK 1742 /* Multilink: fragment the packet over as many links 1743 as can take the packet at the moment. */ 1744 if (!ppp_mp_explode(ppp, skb)) 1745 return; 1746 #endif /* CONFIG_PPP_MULTILINK */ 1747 1748 ppp->xmit_pending = NULL; 1749 kfree_skb(skb); 1750 } 1751 1752 #ifdef CONFIG_PPP_MULTILINK 1753 static bool mp_protocol_compress __read_mostly = true; 1754 module_param(mp_protocol_compress, bool, 0644); 1755 MODULE_PARM_DESC(mp_protocol_compress, 1756 "compress protocol id in multilink fragments"); 1757 1758 /* 1759 * Divide a packet to be transmitted into fragments and 1760 * send them out the individual links. 1761 */ 1762 static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) 1763 { 1764 int len, totlen; 1765 int i, bits, hdrlen, mtu; 1766 int flen; 1767 int navail, nfree, nzero; 1768 int nbigger; 1769 int totspeed; 1770 int totfree; 1771 unsigned char *p, *q; 1772 struct list_head *list; 1773 struct channel *pch; 1774 struct sk_buff *frag; 1775 struct ppp_channel *chan; 1776 1777 totspeed = 0; /*total bitrate of the bundle*/ 1778 nfree = 0; /* # channels which have no packet already queued */ 1779 navail = 0; /* total # of usable channels (not deregistered) */ 1780 nzero = 0; /* number of channels with zero speed associated*/ 1781 totfree = 0; /*total # of channels available and 1782 *having no queued packets before 1783 *starting the fragmentation*/ 1784 1785 hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 1786 i = 0; 1787 list_for_each_entry(pch, &ppp->channels, clist) { 1788 if (pch->chan) { 1789 pch->avail = 1; 1790 navail++; 1791 pch->speed = pch->chan->speed; 1792 } else { 1793 pch->avail = 0; 1794 } 1795 if (pch->avail) { 1796 if (skb_queue_empty(&pch->file.xq) || 1797 !pch->had_frag) { 1798 if (pch->speed == 0) 1799 nzero++; 1800 else 1801 totspeed += pch->speed; 1802 1803 pch->avail = 2; 1804 ++nfree; 1805 ++totfree; 1806 } 1807 if (!pch->had_frag && i < ppp->nxchan) 1808 ppp->nxchan = i; 1809 } 1810 ++i; 1811 } 1812 /* 1813 * Don't start sending this packet unless at least half of 1814 * the channels are free. This gives much better TCP 1815 * performance if we have a lot of channels. 1816 */ 1817 if (nfree == 0 || nfree < navail / 2) 1818 return 0; /* can't take now, leave it in xmit_pending */ 1819 1820 /* Do protocol field compression */ 1821 p = skb->data; 1822 len = skb->len; 1823 if (*p == 0 && mp_protocol_compress) { 1824 ++p; 1825 --len; 1826 } 1827 1828 totlen = len; 1829 nbigger = len % nfree; 1830 1831 /* skip to the channel after the one we last used 1832 and start at that one */ 1833 list = &ppp->channels; 1834 for (i = 0; i < ppp->nxchan; ++i) { 1835 list = list->next; 1836 if (list == &ppp->channels) { 1837 i = 0; 1838 break; 1839 } 1840 } 1841 1842 /* create a fragment for each channel */ 1843 bits = B; 1844 while (len > 0) { 1845 list = list->next; 1846 if (list == &ppp->channels) { 1847 i = 0; 1848 continue; 1849 } 1850 pch = list_entry(list, struct channel, clist); 1851 ++i; 1852 if (!pch->avail) 1853 continue; 1854 1855 /* 1856 * Skip this channel if it has a fragment pending already and 1857 * we haven't given a fragment to all of the free channels. 1858 */ 1859 if (pch->avail == 1) { 1860 if (nfree > 0) 1861 continue; 1862 } else { 1863 pch->avail = 1; 1864 } 1865 1866 /* check the channel's mtu and whether it is still attached. */ 1867 spin_lock(&pch->downl); 1868 if (pch->chan == NULL) { 1869 /* can't use this channel, it's being deregistered */ 1870 if (pch->speed == 0) 1871 nzero--; 1872 else 1873 totspeed -= pch->speed; 1874 1875 spin_unlock(&pch->downl); 1876 pch->avail = 0; 1877 totlen = len; 1878 totfree--; 1879 nfree--; 1880 if (--navail == 0) 1881 break; 1882 continue; 1883 } 1884 1885 /* 1886 *if the channel speed is not set divide 1887 *the packet evenly among the free channels; 1888 *otherwise divide it according to the speed 1889 *of the channel we are going to transmit on 1890 */ 1891 flen = len; 1892 if (nfree > 0) { 1893 if (pch->speed == 0) { 1894 flen = len/nfree; 1895 if (nbigger > 0) { 1896 flen++; 1897 nbigger--; 1898 } 1899 } else { 1900 flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) / 1901 ((totspeed*totfree)/pch->speed)) - hdrlen; 1902 if (nbigger > 0) { 1903 flen += ((totfree - nzero)*pch->speed)/totspeed; 1904 nbigger -= ((totfree - nzero)*pch->speed)/ 1905 totspeed; 1906 } 1907 } 1908 nfree--; 1909 } 1910 1911 /* 1912 *check if we are on the last channel or 1913 *we exceded the length of the data to 1914 *fragment 1915 */ 1916 if ((nfree <= 0) || (flen > len)) 1917 flen = len; 1918 /* 1919 *it is not worth to tx on slow channels: 1920 *in that case from the resulting flen according to the 1921 *above formula will be equal or less than zero. 1922 *Skip the channel in this case 1923 */ 1924 if (flen <= 0) { 1925 pch->avail = 2; 1926 spin_unlock(&pch->downl); 1927 continue; 1928 } 1929 1930 /* 1931 * hdrlen includes the 2-byte PPP protocol field, but the 1932 * MTU counts only the payload excluding the protocol field. 1933 * (RFC1661 Section 2) 1934 */ 1935 mtu = pch->chan->mtu - (hdrlen - 2); 1936 if (mtu < 4) 1937 mtu = 4; 1938 if (flen > mtu) 1939 flen = mtu; 1940 if (flen == len) 1941 bits |= E; 1942 frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC); 1943 if (!frag) 1944 goto noskb; 1945 q = skb_put(frag, flen + hdrlen); 1946 1947 /* make the MP header */ 1948 put_unaligned_be16(PPP_MP, q); 1949 if (ppp->flags & SC_MP_XSHORTSEQ) { 1950 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1951 q[3] = ppp->nxseq; 1952 } else { 1953 q[2] = bits; 1954 q[3] = ppp->nxseq >> 16; 1955 q[4] = ppp->nxseq >> 8; 1956 q[5] = ppp->nxseq; 1957 } 1958 1959 memcpy(q + hdrlen, p, flen); 1960 1961 /* try to send it down the channel */ 1962 chan = pch->chan; 1963 if (!skb_queue_empty(&pch->file.xq) || 1964 !chan->ops->start_xmit(chan, frag)) 1965 skb_queue_tail(&pch->file.xq, frag); 1966 pch->had_frag = 1; 1967 p += flen; 1968 len -= flen; 1969 ++ppp->nxseq; 1970 bits = 0; 1971 spin_unlock(&pch->downl); 1972 } 1973 ppp->nxchan = i; 1974 1975 return 1; 1976 1977 noskb: 1978 spin_unlock(&pch->downl); 1979 if (ppp->debug & 1) 1980 netdev_err(ppp->dev, "PPP: no memory (fragment)\n"); 1981 ++ppp->dev->stats.tx_errors; 1982 ++ppp->nxseq; 1983 return 1; /* abandon the frame */ 1984 } 1985 #endif /* CONFIG_PPP_MULTILINK */ 1986 1987 /* Try to send data out on a channel */ 1988 static void __ppp_channel_push(struct channel *pch) 1989 { 1990 struct sk_buff *skb; 1991 struct ppp *ppp; 1992 1993 spin_lock(&pch->downl); 1994 if (pch->chan) { 1995 while (!skb_queue_empty(&pch->file.xq)) { 1996 skb = skb_dequeue(&pch->file.xq); 1997 if (!pch->chan->ops->start_xmit(pch->chan, skb)) { 1998 /* put the packet back and try again later */ 1999 skb_queue_head(&pch->file.xq, skb); 2000 break; 2001 } 2002 } 2003 } else { 2004 /* channel got deregistered */ 2005 skb_queue_purge(&pch->file.xq); 2006 } 2007 spin_unlock(&pch->downl); 2008 /* see if there is anything from the attached unit to be sent */ 2009 if (skb_queue_empty(&pch->file.xq)) { 2010 ppp = pch->ppp; 2011 if (ppp) 2012 __ppp_xmit_process(ppp, NULL); 2013 } 2014 } 2015 2016 static void ppp_channel_push(struct channel *pch) 2017 { 2018 read_lock_bh(&pch->upl); 2019 if (pch->ppp) { 2020 (*this_cpu_ptr(pch->ppp->xmit_recursion))++; 2021 __ppp_channel_push(pch); 2022 (*this_cpu_ptr(pch->ppp->xmit_recursion))--; 2023 } else { 2024 __ppp_channel_push(pch); 2025 } 2026 read_unlock_bh(&pch->upl); 2027 } 2028 2029 /* 2030 * Receive-side routines. 2031 */ 2032 2033 struct ppp_mp_skb_parm { 2034 u32 sequence; 2035 u8 BEbits; 2036 }; 2037 #define PPP_MP_CB(skb) ((struct ppp_mp_skb_parm *)((skb)->cb)) 2038 2039 static inline void 2040 ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2041 { 2042 ppp_recv_lock(ppp); 2043 if (!ppp->closing) 2044 ppp_receive_frame(ppp, skb, pch); 2045 else 2046 kfree_skb(skb); 2047 ppp_recv_unlock(ppp); 2048 } 2049 2050 /** 2051 * __ppp_decompress_proto - Decompress protocol field, slim version. 2052 * @skb: Socket buffer where protocol field should be decompressed. It must have 2053 * at least 1 byte of head room and 1 byte of linear data. First byte of 2054 * data must be a protocol field byte. 2055 * 2056 * Decompress protocol field in PPP header if it's compressed, e.g. when 2057 * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data 2058 * length are done in this function. 2059 */ 2060 static void __ppp_decompress_proto(struct sk_buff *skb) 2061 { 2062 if (skb->data[0] & 0x01) 2063 *(u8 *)skb_push(skb, 1) = 0x00; 2064 } 2065 2066 /** 2067 * ppp_decompress_proto - Check skb data room and decompress protocol field. 2068 * @skb: Socket buffer where protocol field should be decompressed. First byte 2069 * of data must be a protocol field byte. 2070 * 2071 * Decompress protocol field in PPP header if it's compressed, e.g. when 2072 * Protocol-Field-Compression (PFC) was negotiated. This function also makes 2073 * sure that skb data room is sufficient for Protocol field, before and after 2074 * decompression. 2075 * 2076 * Return: true - decompressed successfully, false - not enough room in skb. 2077 */ 2078 static bool ppp_decompress_proto(struct sk_buff *skb) 2079 { 2080 /* At least one byte should be present (if protocol is compressed) */ 2081 if (!pskb_may_pull(skb, 1)) 2082 return false; 2083 2084 __ppp_decompress_proto(skb); 2085 2086 /* Protocol field should occupy 2 bytes when not compressed */ 2087 return pskb_may_pull(skb, 2); 2088 } 2089 2090 void 2091 ppp_input(struct ppp_channel *chan, struct sk_buff *skb) 2092 { 2093 struct channel *pch = chan->ppp; 2094 int proto; 2095 2096 if (!pch) { 2097 kfree_skb(skb); 2098 return; 2099 } 2100 2101 read_lock_bh(&pch->upl); 2102 if (!ppp_decompress_proto(skb)) { 2103 kfree_skb(skb); 2104 if (pch->ppp) { 2105 ++pch->ppp->dev->stats.rx_length_errors; 2106 ppp_receive_error(pch->ppp); 2107 } 2108 goto done; 2109 } 2110 2111 proto = PPP_PROTO(skb); 2112 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 2113 /* put it on the channel queue */ 2114 skb_queue_tail(&pch->file.rq, skb); 2115 /* drop old frames if queue too long */ 2116 while (pch->file.rq.qlen > PPP_MAX_RQLEN && 2117 (skb = skb_dequeue(&pch->file.rq))) 2118 kfree_skb(skb); 2119 wake_up_interruptible(&pch->file.rwait); 2120 } else { 2121 ppp_do_recv(pch->ppp, skb, pch); 2122 } 2123 2124 done: 2125 read_unlock_bh(&pch->upl); 2126 } 2127 2128 /* Put a 0-length skb in the receive queue as an error indication */ 2129 void 2130 ppp_input_error(struct ppp_channel *chan, int code) 2131 { 2132 struct channel *pch = chan->ppp; 2133 struct sk_buff *skb; 2134 2135 if (!pch) 2136 return; 2137 2138 read_lock_bh(&pch->upl); 2139 if (pch->ppp) { 2140 skb = alloc_skb(0, GFP_ATOMIC); 2141 if (skb) { 2142 skb->len = 0; /* probably unnecessary */ 2143 skb->cb[0] = code; 2144 ppp_do_recv(pch->ppp, skb, pch); 2145 } 2146 } 2147 read_unlock_bh(&pch->upl); 2148 } 2149 2150 /* 2151 * We come in here to process a received frame. 2152 * The receive side of the ppp unit is locked. 2153 */ 2154 static void 2155 ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2156 { 2157 /* note: a 0-length skb is used as an error indication */ 2158 if (skb->len > 0) { 2159 skb_checksum_complete_unset(skb); 2160 #ifdef CONFIG_PPP_MULTILINK 2161 /* XXX do channel-level decompression here */ 2162 if (PPP_PROTO(skb) == PPP_MP) 2163 ppp_receive_mp_frame(ppp, skb, pch); 2164 else 2165 #endif /* CONFIG_PPP_MULTILINK */ 2166 ppp_receive_nonmp_frame(ppp, skb); 2167 } else { 2168 kfree_skb(skb); 2169 ppp_receive_error(ppp); 2170 } 2171 } 2172 2173 static void 2174 ppp_receive_error(struct ppp *ppp) 2175 { 2176 ++ppp->dev->stats.rx_errors; 2177 if (ppp->vj) 2178 slhc_toss(ppp->vj); 2179 } 2180 2181 static void 2182 ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) 2183 { 2184 struct sk_buff *ns; 2185 int proto, len, npi; 2186 2187 /* 2188 * Decompress the frame, if compressed. 2189 * Note that some decompressors need to see uncompressed frames 2190 * that come in as well as compressed frames. 2191 */ 2192 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN) && 2193 (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0) 2194 skb = ppp_decompress_frame(ppp, skb); 2195 2196 if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) 2197 goto err; 2198 2199 /* At this point the "Protocol" field MUST be decompressed, either in 2200 * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame(). 2201 */ 2202 proto = PPP_PROTO(skb); 2203 switch (proto) { 2204 case PPP_VJC_COMP: 2205 /* decompress VJ compressed packets */ 2206 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 2207 goto err; 2208 2209 if (skb_tailroom(skb) < 124 || skb_cloned(skb)) { 2210 /* copy to a new sk_buff with more tailroom */ 2211 ns = dev_alloc_skb(skb->len + 128); 2212 if (!ns) { 2213 netdev_err(ppp->dev, "PPP: no memory " 2214 "(VJ decomp)\n"); 2215 goto err; 2216 } 2217 skb_reserve(ns, 2); 2218 skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len); 2219 consume_skb(skb); 2220 skb = ns; 2221 } 2222 else 2223 skb->ip_summed = CHECKSUM_NONE; 2224 2225 len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2); 2226 if (len <= 0) { 2227 netdev_printk(KERN_DEBUG, ppp->dev, 2228 "PPP: VJ decompression error\n"); 2229 goto err; 2230 } 2231 len += 2; 2232 if (len > skb->len) 2233 skb_put(skb, len - skb->len); 2234 else if (len < skb->len) 2235 skb_trim(skb, len); 2236 proto = PPP_IP; 2237 break; 2238 2239 case PPP_VJC_UNCOMP: 2240 if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP)) 2241 goto err; 2242 2243 /* Until we fix the decompressor need to make sure 2244 * data portion is linear. 2245 */ 2246 if (!pskb_may_pull(skb, skb->len)) 2247 goto err; 2248 2249 if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) { 2250 netdev_err(ppp->dev, "PPP: VJ uncompressed error\n"); 2251 goto err; 2252 } 2253 proto = PPP_IP; 2254 break; 2255 2256 case PPP_CCP: 2257 ppp_ccp_peek(ppp, skb, 1); 2258 break; 2259 } 2260 2261 ++ppp->stats64.rx_packets; 2262 ppp->stats64.rx_bytes += skb->len - 2; 2263 2264 npi = proto_to_npindex(proto); 2265 if (npi < 0) { 2266 /* control or unknown frame - pass it to pppd */ 2267 skb_queue_tail(&ppp->file.rq, skb); 2268 /* limit queue length by dropping old frames */ 2269 while (ppp->file.rq.qlen > PPP_MAX_RQLEN && 2270 (skb = skb_dequeue(&ppp->file.rq))) 2271 kfree_skb(skb); 2272 /* wake up any process polling or blocking on read */ 2273 wake_up_interruptible(&ppp->file.rwait); 2274 2275 } else { 2276 /* network protocol frame - give it to the kernel */ 2277 2278 #ifdef CONFIG_PPP_FILTER 2279 /* check if the packet passes the pass and active filters */ 2280 /* the filter instructions are constructed assuming 2281 a four-byte PPP header on each packet */ 2282 if (ppp->pass_filter || ppp->active_filter) { 2283 if (skb_unclone(skb, GFP_ATOMIC)) 2284 goto err; 2285 2286 *(u8 *)skb_push(skb, 2) = 0; 2287 if (ppp->pass_filter && 2288 BPF_PROG_RUN(ppp->pass_filter, skb) == 0) { 2289 if (ppp->debug & 1) 2290 netdev_printk(KERN_DEBUG, ppp->dev, 2291 "PPP: inbound frame " 2292 "not passed\n"); 2293 kfree_skb(skb); 2294 return; 2295 } 2296 if (!(ppp->active_filter && 2297 BPF_PROG_RUN(ppp->active_filter, skb) == 0)) 2298 ppp->last_recv = jiffies; 2299 __skb_pull(skb, 2); 2300 } else 2301 #endif /* CONFIG_PPP_FILTER */ 2302 ppp->last_recv = jiffies; 2303 2304 if ((ppp->dev->flags & IFF_UP) == 0 || 2305 ppp->npmode[npi] != NPMODE_PASS) { 2306 kfree_skb(skb); 2307 } else { 2308 /* chop off protocol */ 2309 skb_pull_rcsum(skb, 2); 2310 skb->dev = ppp->dev; 2311 skb->protocol = htons(npindex_to_ethertype[npi]); 2312 skb_reset_mac_header(skb); 2313 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, 2314 dev_net(ppp->dev))); 2315 netif_rx(skb); 2316 } 2317 } 2318 return; 2319 2320 err: 2321 kfree_skb(skb); 2322 ppp_receive_error(ppp); 2323 } 2324 2325 static struct sk_buff * 2326 ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) 2327 { 2328 int proto = PPP_PROTO(skb); 2329 struct sk_buff *ns; 2330 int len; 2331 2332 /* Until we fix all the decompressor's need to make sure 2333 * data portion is linear. 2334 */ 2335 if (!pskb_may_pull(skb, skb->len)) 2336 goto err; 2337 2338 if (proto == PPP_COMP) { 2339 int obuff_size; 2340 2341 switch(ppp->rcomp->compress_proto) { 2342 case CI_MPPE: 2343 obuff_size = ppp->mru + PPP_HDRLEN + 1; 2344 break; 2345 default: 2346 obuff_size = ppp->mru + PPP_HDRLEN; 2347 break; 2348 } 2349 2350 ns = dev_alloc_skb(obuff_size); 2351 if (!ns) { 2352 netdev_err(ppp->dev, "ppp_decompress_frame: " 2353 "no memory\n"); 2354 goto err; 2355 } 2356 /* the decompressor still expects the A/C bytes in the hdr */ 2357 len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2, 2358 skb->len + 2, ns->data, obuff_size); 2359 if (len < 0) { 2360 /* Pass the compressed frame to pppd as an 2361 error indication. */ 2362 if (len == DECOMP_FATALERROR) 2363 ppp->rstate |= SC_DC_FERROR; 2364 kfree_skb(ns); 2365 goto err; 2366 } 2367 2368 consume_skb(skb); 2369 skb = ns; 2370 skb_put(skb, len); 2371 skb_pull(skb, 2); /* pull off the A/C bytes */ 2372 2373 /* Don't call __ppp_decompress_proto() here, but instead rely on 2374 * corresponding algo (mppe/bsd/deflate) to decompress it. 2375 */ 2376 } else { 2377 /* Uncompressed frame - pass to decompressor so it 2378 can update its dictionary if necessary. */ 2379 if (ppp->rcomp->incomp) 2380 ppp->rcomp->incomp(ppp->rc_state, skb->data - 2, 2381 skb->len + 2); 2382 } 2383 2384 return skb; 2385 2386 err: 2387 ppp->rstate |= SC_DC_ERROR; 2388 ppp_receive_error(ppp); 2389 return skb; 2390 } 2391 2392 #ifdef CONFIG_PPP_MULTILINK 2393 /* 2394 * Receive a multilink frame. 2395 * We put it on the reconstruction queue and then pull off 2396 * as many completed frames as we can. 2397 */ 2398 static void 2399 ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 2400 { 2401 u32 mask, seq; 2402 struct channel *ch; 2403 int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; 2404 2405 if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0) 2406 goto err; /* no good, throw it away */ 2407 2408 /* Decode sequence number and begin/end bits */ 2409 if (ppp->flags & SC_MP_SHORTSEQ) { 2410 seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3]; 2411 mask = 0xfff; 2412 } else { 2413 seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5]; 2414 mask = 0xffffff; 2415 } 2416 PPP_MP_CB(skb)->BEbits = skb->data[2]; 2417 skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */ 2418 2419 /* 2420 * Do protocol ID decompression on the first fragment of each packet. 2421 * We have to do that here, because ppp_receive_nonmp_frame() expects 2422 * decompressed protocol field. 2423 */ 2424 if (PPP_MP_CB(skb)->BEbits & B) 2425 __ppp_decompress_proto(skb); 2426 2427 /* 2428 * Expand sequence number to 32 bits, making it as close 2429 * as possible to ppp->minseq. 2430 */ 2431 seq |= ppp->minseq & ~mask; 2432 if ((int)(ppp->minseq - seq) > (int)(mask >> 1)) 2433 seq += mask + 1; 2434 else if ((int)(seq - ppp->minseq) > (int)(mask >> 1)) 2435 seq -= mask + 1; /* should never happen */ 2436 PPP_MP_CB(skb)->sequence = seq; 2437 pch->lastseq = seq; 2438 2439 /* 2440 * If this packet comes before the next one we were expecting, 2441 * drop it. 2442 */ 2443 if (seq_before(seq, ppp->nextseq)) { 2444 kfree_skb(skb); 2445 ++ppp->dev->stats.rx_dropped; 2446 ppp_receive_error(ppp); 2447 return; 2448 } 2449 2450 /* 2451 * Reevaluate minseq, the minimum over all channels of the 2452 * last sequence number received on each channel. Because of 2453 * the increasing sequence number rule, we know that any fragment 2454 * before `minseq' which hasn't arrived is never going to arrive. 2455 * The list of channels can't change because we have the receive 2456 * side of the ppp unit locked. 2457 */ 2458 list_for_each_entry(ch, &ppp->channels, clist) { 2459 if (seq_before(ch->lastseq, seq)) 2460 seq = ch->lastseq; 2461 } 2462 if (seq_before(ppp->minseq, seq)) 2463 ppp->minseq = seq; 2464 2465 /* Put the fragment on the reconstruction queue */ 2466 ppp_mp_insert(ppp, skb); 2467 2468 /* If the queue is getting long, don't wait any longer for packets 2469 before the start of the queue. */ 2470 if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) { 2471 struct sk_buff *mskb = skb_peek(&ppp->mrq); 2472 if (seq_before(ppp->minseq, PPP_MP_CB(mskb)->sequence)) 2473 ppp->minseq = PPP_MP_CB(mskb)->sequence; 2474 } 2475 2476 /* Pull completed packets off the queue and receive them. */ 2477 while ((skb = ppp_mp_reconstruct(ppp))) { 2478 if (pskb_may_pull(skb, 2)) 2479 ppp_receive_nonmp_frame(ppp, skb); 2480 else { 2481 ++ppp->dev->stats.rx_length_errors; 2482 kfree_skb(skb); 2483 ppp_receive_error(ppp); 2484 } 2485 } 2486 2487 return; 2488 2489 err: 2490 kfree_skb(skb); 2491 ppp_receive_error(ppp); 2492 } 2493 2494 /* 2495 * Insert a fragment on the MP reconstruction queue. 2496 * The queue is ordered by increasing sequence number. 2497 */ 2498 static void 2499 ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb) 2500 { 2501 struct sk_buff *p; 2502 struct sk_buff_head *list = &ppp->mrq; 2503 u32 seq = PPP_MP_CB(skb)->sequence; 2504 2505 /* N.B. we don't need to lock the list lock because we have the 2506 ppp unit receive-side lock. */ 2507 skb_queue_walk(list, p) { 2508 if (seq_before(seq, PPP_MP_CB(p)->sequence)) 2509 break; 2510 } 2511 __skb_queue_before(list, p, skb); 2512 } 2513 2514 /* 2515 * Reconstruct a packet from the MP fragment queue. 2516 * We go through increasing sequence numbers until we find a 2517 * complete packet, or we get to the sequence number for a fragment 2518 * which hasn't arrived but might still do so. 2519 */ 2520 static struct sk_buff * 2521 ppp_mp_reconstruct(struct ppp *ppp) 2522 { 2523 u32 seq = ppp->nextseq; 2524 u32 minseq = ppp->minseq; 2525 struct sk_buff_head *list = &ppp->mrq; 2526 struct sk_buff *p, *tmp; 2527 struct sk_buff *head, *tail; 2528 struct sk_buff *skb = NULL; 2529 int lost = 0, len = 0; 2530 2531 if (ppp->mrru == 0) /* do nothing until mrru is set */ 2532 return NULL; 2533 head = __skb_peek(list); 2534 tail = NULL; 2535 skb_queue_walk_safe(list, p, tmp) { 2536 again: 2537 if (seq_before(PPP_MP_CB(p)->sequence, seq)) { 2538 /* this can't happen, anyway ignore the skb */ 2539 netdev_err(ppp->dev, "ppp_mp_reconstruct bad " 2540 "seq %u < %u\n", 2541 PPP_MP_CB(p)->sequence, seq); 2542 __skb_unlink(p, list); 2543 kfree_skb(p); 2544 continue; 2545 } 2546 if (PPP_MP_CB(p)->sequence != seq) { 2547 u32 oldseq; 2548 /* Fragment `seq' is missing. If it is after 2549 minseq, it might arrive later, so stop here. */ 2550 if (seq_after(seq, minseq)) 2551 break; 2552 /* Fragment `seq' is lost, keep going. */ 2553 lost = 1; 2554 oldseq = seq; 2555 seq = seq_before(minseq, PPP_MP_CB(p)->sequence)? 2556 minseq + 1: PPP_MP_CB(p)->sequence; 2557 2558 if (ppp->debug & 1) 2559 netdev_printk(KERN_DEBUG, ppp->dev, 2560 "lost frag %u..%u\n", 2561 oldseq, seq-1); 2562 2563 goto again; 2564 } 2565 2566 /* 2567 * At this point we know that all the fragments from 2568 * ppp->nextseq to seq are either present or lost. 2569 * Also, there are no complete packets in the queue 2570 * that have no missing fragments and end before this 2571 * fragment. 2572 */ 2573 2574 /* B bit set indicates this fragment starts a packet */ 2575 if (PPP_MP_CB(p)->BEbits & B) { 2576 head = p; 2577 lost = 0; 2578 len = 0; 2579 } 2580 2581 len += p->len; 2582 2583 /* Got a complete packet yet? */ 2584 if (lost == 0 && (PPP_MP_CB(p)->BEbits & E) && 2585 (PPP_MP_CB(head)->BEbits & B)) { 2586 if (len > ppp->mrru + 2) { 2587 ++ppp->dev->stats.rx_length_errors; 2588 netdev_printk(KERN_DEBUG, ppp->dev, 2589 "PPP: reconstructed packet" 2590 " is too long (%d)\n", len); 2591 } else { 2592 tail = p; 2593 break; 2594 } 2595 ppp->nextseq = seq + 1; 2596 } 2597 2598 /* 2599 * If this is the ending fragment of a packet, 2600 * and we haven't found a complete valid packet yet, 2601 * we can discard up to and including this fragment. 2602 */ 2603 if (PPP_MP_CB(p)->BEbits & E) { 2604 struct sk_buff *tmp2; 2605 2606 skb_queue_reverse_walk_from_safe(list, p, tmp2) { 2607 if (ppp->debug & 1) 2608 netdev_printk(KERN_DEBUG, ppp->dev, 2609 "discarding frag %u\n", 2610 PPP_MP_CB(p)->sequence); 2611 __skb_unlink(p, list); 2612 kfree_skb(p); 2613 } 2614 head = skb_peek(list); 2615 if (!head) 2616 break; 2617 } 2618 ++seq; 2619 } 2620 2621 /* If we have a complete packet, copy it all into one skb. */ 2622 if (tail != NULL) { 2623 /* If we have discarded any fragments, 2624 signal a receive error. */ 2625 if (PPP_MP_CB(head)->sequence != ppp->nextseq) { 2626 skb_queue_walk_safe(list, p, tmp) { 2627 if (p == head) 2628 break; 2629 if (ppp->debug & 1) 2630 netdev_printk(KERN_DEBUG, ppp->dev, 2631 "discarding frag %u\n", 2632 PPP_MP_CB(p)->sequence); 2633 __skb_unlink(p, list); 2634 kfree_skb(p); 2635 } 2636 2637 if (ppp->debug & 1) 2638 netdev_printk(KERN_DEBUG, ppp->dev, 2639 " missed pkts %u..%u\n", 2640 ppp->nextseq, 2641 PPP_MP_CB(head)->sequence-1); 2642 ++ppp->dev->stats.rx_dropped; 2643 ppp_receive_error(ppp); 2644 } 2645 2646 skb = head; 2647 if (head != tail) { 2648 struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list; 2649 p = skb_queue_next(list, head); 2650 __skb_unlink(skb, list); 2651 skb_queue_walk_from_safe(list, p, tmp) { 2652 __skb_unlink(p, list); 2653 *fragpp = p; 2654 p->next = NULL; 2655 fragpp = &p->next; 2656 2657 skb->len += p->len; 2658 skb->data_len += p->len; 2659 skb->truesize += p->truesize; 2660 2661 if (p == tail) 2662 break; 2663 } 2664 } else { 2665 __skb_unlink(skb, list); 2666 } 2667 2668 ppp->nextseq = PPP_MP_CB(tail)->sequence + 1; 2669 } 2670 2671 return skb; 2672 } 2673 #endif /* CONFIG_PPP_MULTILINK */ 2674 2675 /* 2676 * Channel interface. 2677 */ 2678 2679 /* Create a new, unattached ppp channel. */ 2680 int ppp_register_channel(struct ppp_channel *chan) 2681 { 2682 return ppp_register_net_channel(current->nsproxy->net_ns, chan); 2683 } 2684 2685 /* Create a new, unattached ppp channel for specified net. */ 2686 int ppp_register_net_channel(struct net *net, struct ppp_channel *chan) 2687 { 2688 struct channel *pch; 2689 struct ppp_net *pn; 2690 2691 pch = kzalloc(sizeof(struct channel), GFP_KERNEL); 2692 if (!pch) 2693 return -ENOMEM; 2694 2695 pn = ppp_pernet(net); 2696 2697 pch->ppp = NULL; 2698 pch->chan = chan; 2699 pch->chan_net = get_net(net); 2700 chan->ppp = pch; 2701 init_ppp_file(&pch->file, CHANNEL); 2702 pch->file.hdrlen = chan->hdrlen; 2703 #ifdef CONFIG_PPP_MULTILINK 2704 pch->lastseq = -1; 2705 #endif /* CONFIG_PPP_MULTILINK */ 2706 init_rwsem(&pch->chan_sem); 2707 spin_lock_init(&pch->downl); 2708 rwlock_init(&pch->upl); 2709 2710 spin_lock_bh(&pn->all_channels_lock); 2711 pch->file.index = ++pn->last_channel_index; 2712 list_add(&pch->list, &pn->new_channels); 2713 atomic_inc(&channel_count); 2714 spin_unlock_bh(&pn->all_channels_lock); 2715 2716 return 0; 2717 } 2718 2719 /* 2720 * Return the index of a channel. 2721 */ 2722 int ppp_channel_index(struct ppp_channel *chan) 2723 { 2724 struct channel *pch = chan->ppp; 2725 2726 if (pch) 2727 return pch->file.index; 2728 return -1; 2729 } 2730 2731 /* 2732 * Return the PPP unit number to which a channel is connected. 2733 */ 2734 int ppp_unit_number(struct ppp_channel *chan) 2735 { 2736 struct channel *pch = chan->ppp; 2737 int unit = -1; 2738 2739 if (pch) { 2740 read_lock_bh(&pch->upl); 2741 if (pch->ppp) 2742 unit = pch->ppp->file.index; 2743 read_unlock_bh(&pch->upl); 2744 } 2745 return unit; 2746 } 2747 2748 /* 2749 * Return the PPP device interface name of a channel. 2750 */ 2751 char *ppp_dev_name(struct ppp_channel *chan) 2752 { 2753 struct channel *pch = chan->ppp; 2754 char *name = NULL; 2755 2756 if (pch) { 2757 read_lock_bh(&pch->upl); 2758 if (pch->ppp && pch->ppp->dev) 2759 name = pch->ppp->dev->name; 2760 read_unlock_bh(&pch->upl); 2761 } 2762 return name; 2763 } 2764 2765 2766 /* 2767 * Disconnect a channel from the generic layer. 2768 * This must be called in process context. 2769 */ 2770 void 2771 ppp_unregister_channel(struct ppp_channel *chan) 2772 { 2773 struct channel *pch = chan->ppp; 2774 struct ppp_net *pn; 2775 2776 if (!pch) 2777 return; /* should never happen */ 2778 2779 chan->ppp = NULL; 2780 2781 /* 2782 * This ensures that we have returned from any calls into the 2783 * the channel's start_xmit or ioctl routine before we proceed. 2784 */ 2785 down_write(&pch->chan_sem); 2786 spin_lock_bh(&pch->downl); 2787 pch->chan = NULL; 2788 spin_unlock_bh(&pch->downl); 2789 up_write(&pch->chan_sem); 2790 ppp_disconnect_channel(pch); 2791 2792 pn = ppp_pernet(pch->chan_net); 2793 spin_lock_bh(&pn->all_channels_lock); 2794 list_del(&pch->list); 2795 spin_unlock_bh(&pn->all_channels_lock); 2796 2797 pch->file.dead = 1; 2798 wake_up_interruptible(&pch->file.rwait); 2799 if (refcount_dec_and_test(&pch->file.refcnt)) 2800 ppp_destroy_channel(pch); 2801 } 2802 2803 /* 2804 * Callback from a channel when it can accept more to transmit. 2805 * This should be called at BH/softirq level, not interrupt level. 2806 */ 2807 void 2808 ppp_output_wakeup(struct ppp_channel *chan) 2809 { 2810 struct channel *pch = chan->ppp; 2811 2812 if (!pch) 2813 return; 2814 ppp_channel_push(pch); 2815 } 2816 2817 /* 2818 * Compression control. 2819 */ 2820 2821 /* Process the PPPIOCSCOMPRESS ioctl. */ 2822 static int 2823 ppp_set_compress(struct ppp *ppp, struct ppp_option_data *data) 2824 { 2825 int err = -EFAULT; 2826 struct compressor *cp, *ocomp; 2827 void *state, *ostate; 2828 unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; 2829 2830 if (data->length > CCP_MAX_OPTION_LENGTH) 2831 goto out; 2832 if (copy_from_user(ccp_option, data->ptr, data->length)) 2833 goto out; 2834 2835 err = -EINVAL; 2836 if (data->length < 2 || ccp_option[1] < 2 || ccp_option[1] > data->length) 2837 goto out; 2838 2839 cp = try_then_request_module( 2840 find_compressor(ccp_option[0]), 2841 "ppp-compress-%d", ccp_option[0]); 2842 if (!cp) 2843 goto out; 2844 2845 err = -ENOBUFS; 2846 if (data->transmit) { 2847 state = cp->comp_alloc(ccp_option, data->length); 2848 if (state) { 2849 ppp_xmit_lock(ppp); 2850 ppp->xstate &= ~SC_COMP_RUN; 2851 ocomp = ppp->xcomp; 2852 ostate = ppp->xc_state; 2853 ppp->xcomp = cp; 2854 ppp->xc_state = state; 2855 ppp_xmit_unlock(ppp); 2856 if (ostate) { 2857 ocomp->comp_free(ostate); 2858 module_put(ocomp->owner); 2859 } 2860 err = 0; 2861 } else 2862 module_put(cp->owner); 2863 2864 } else { 2865 state = cp->decomp_alloc(ccp_option, data->length); 2866 if (state) { 2867 ppp_recv_lock(ppp); 2868 ppp->rstate &= ~SC_DECOMP_RUN; 2869 ocomp = ppp->rcomp; 2870 ostate = ppp->rc_state; 2871 ppp->rcomp = cp; 2872 ppp->rc_state = state; 2873 ppp_recv_unlock(ppp); 2874 if (ostate) { 2875 ocomp->decomp_free(ostate); 2876 module_put(ocomp->owner); 2877 } 2878 err = 0; 2879 } else 2880 module_put(cp->owner); 2881 } 2882 2883 out: 2884 return err; 2885 } 2886 2887 /* 2888 * Look at a CCP packet and update our state accordingly. 2889 * We assume the caller has the xmit or recv path locked. 2890 */ 2891 static void 2892 ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound) 2893 { 2894 unsigned char *dp; 2895 int len; 2896 2897 if (!pskb_may_pull(skb, CCP_HDRLEN + 2)) 2898 return; /* no header */ 2899 dp = skb->data + 2; 2900 2901 switch (CCP_CODE(dp)) { 2902 case CCP_CONFREQ: 2903 2904 /* A ConfReq starts negotiation of compression 2905 * in one direction of transmission, 2906 * and hence brings it down...but which way? 2907 * 2908 * Remember: 2909 * A ConfReq indicates what the sender would like to receive 2910 */ 2911 if(inbound) 2912 /* He is proposing what I should send */ 2913 ppp->xstate &= ~SC_COMP_RUN; 2914 else 2915 /* I am proposing to what he should send */ 2916 ppp->rstate &= ~SC_DECOMP_RUN; 2917 2918 break; 2919 2920 case CCP_TERMREQ: 2921 case CCP_TERMACK: 2922 /* 2923 * CCP is going down, both directions of transmission 2924 */ 2925 ppp->rstate &= ~SC_DECOMP_RUN; 2926 ppp->xstate &= ~SC_COMP_RUN; 2927 break; 2928 2929 case CCP_CONFACK: 2930 if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN) 2931 break; 2932 len = CCP_LENGTH(dp); 2933 if (!pskb_may_pull(skb, len + 2)) 2934 return; /* too short */ 2935 dp += CCP_HDRLEN; 2936 len -= CCP_HDRLEN; 2937 if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp)) 2938 break; 2939 if (inbound) { 2940 /* we will start receiving compressed packets */ 2941 if (!ppp->rc_state) 2942 break; 2943 if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len, 2944 ppp->file.index, 0, ppp->mru, ppp->debug)) { 2945 ppp->rstate |= SC_DECOMP_RUN; 2946 ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR); 2947 } 2948 } else { 2949 /* we will soon start sending compressed packets */ 2950 if (!ppp->xc_state) 2951 break; 2952 if (ppp->xcomp->comp_init(ppp->xc_state, dp, len, 2953 ppp->file.index, 0, ppp->debug)) 2954 ppp->xstate |= SC_COMP_RUN; 2955 } 2956 break; 2957 2958 case CCP_RESETACK: 2959 /* reset the [de]compressor */ 2960 if ((ppp->flags & SC_CCP_UP) == 0) 2961 break; 2962 if (inbound) { 2963 if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) { 2964 ppp->rcomp->decomp_reset(ppp->rc_state); 2965 ppp->rstate &= ~SC_DC_ERROR; 2966 } 2967 } else { 2968 if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN)) 2969 ppp->xcomp->comp_reset(ppp->xc_state); 2970 } 2971 break; 2972 } 2973 } 2974 2975 /* Free up compression resources. */ 2976 static void 2977 ppp_ccp_closed(struct ppp *ppp) 2978 { 2979 void *xstate, *rstate; 2980 struct compressor *xcomp, *rcomp; 2981 2982 ppp_lock(ppp); 2983 ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP); 2984 ppp->xstate = 0; 2985 xcomp = ppp->xcomp; 2986 xstate = ppp->xc_state; 2987 ppp->xc_state = NULL; 2988 ppp->rstate = 0; 2989 rcomp = ppp->rcomp; 2990 rstate = ppp->rc_state; 2991 ppp->rc_state = NULL; 2992 ppp_unlock(ppp); 2993 2994 if (xstate) { 2995 xcomp->comp_free(xstate); 2996 module_put(xcomp->owner); 2997 } 2998 if (rstate) { 2999 rcomp->decomp_free(rstate); 3000 module_put(rcomp->owner); 3001 } 3002 } 3003 3004 /* List of compressors. */ 3005 static LIST_HEAD(compressor_list); 3006 static DEFINE_SPINLOCK(compressor_list_lock); 3007 3008 struct compressor_entry { 3009 struct list_head list; 3010 struct compressor *comp; 3011 }; 3012 3013 static struct compressor_entry * 3014 find_comp_entry(int proto) 3015 { 3016 struct compressor_entry *ce; 3017 3018 list_for_each_entry(ce, &compressor_list, list) { 3019 if (ce->comp->compress_proto == proto) 3020 return ce; 3021 } 3022 return NULL; 3023 } 3024 3025 /* Register a compressor */ 3026 int 3027 ppp_register_compressor(struct compressor *cp) 3028 { 3029 struct compressor_entry *ce; 3030 int ret; 3031 spin_lock(&compressor_list_lock); 3032 ret = -EEXIST; 3033 if (find_comp_entry(cp->compress_proto)) 3034 goto out; 3035 ret = -ENOMEM; 3036 ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC); 3037 if (!ce) 3038 goto out; 3039 ret = 0; 3040 ce->comp = cp; 3041 list_add(&ce->list, &compressor_list); 3042 out: 3043 spin_unlock(&compressor_list_lock); 3044 return ret; 3045 } 3046 3047 /* Unregister a compressor */ 3048 void 3049 ppp_unregister_compressor(struct compressor *cp) 3050 { 3051 struct compressor_entry *ce; 3052 3053 spin_lock(&compressor_list_lock); 3054 ce = find_comp_entry(cp->compress_proto); 3055 if (ce && ce->comp == cp) { 3056 list_del(&ce->list); 3057 kfree(ce); 3058 } 3059 spin_unlock(&compressor_list_lock); 3060 } 3061 3062 /* Find a compressor. */ 3063 static struct compressor * 3064 find_compressor(int type) 3065 { 3066 struct compressor_entry *ce; 3067 struct compressor *cp = NULL; 3068 3069 spin_lock(&compressor_list_lock); 3070 ce = find_comp_entry(type); 3071 if (ce) { 3072 cp = ce->comp; 3073 if (!try_module_get(cp->owner)) 3074 cp = NULL; 3075 } 3076 spin_unlock(&compressor_list_lock); 3077 return cp; 3078 } 3079 3080 /* 3081 * Miscelleneous stuff. 3082 */ 3083 3084 static void 3085 ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) 3086 { 3087 struct slcompress *vj = ppp->vj; 3088 3089 memset(st, 0, sizeof(*st)); 3090 st->p.ppp_ipackets = ppp->stats64.rx_packets; 3091 st->p.ppp_ierrors = ppp->dev->stats.rx_errors; 3092 st->p.ppp_ibytes = ppp->stats64.rx_bytes; 3093 st->p.ppp_opackets = ppp->stats64.tx_packets; 3094 st->p.ppp_oerrors = ppp->dev->stats.tx_errors; 3095 st->p.ppp_obytes = ppp->stats64.tx_bytes; 3096 if (!vj) 3097 return; 3098 st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; 3099 st->vj.vjs_compressed = vj->sls_o_compressed; 3100 st->vj.vjs_searches = vj->sls_o_searches; 3101 st->vj.vjs_misses = vj->sls_o_misses; 3102 st->vj.vjs_errorin = vj->sls_i_error; 3103 st->vj.vjs_tossed = vj->sls_i_tossed; 3104 st->vj.vjs_uncompressedin = vj->sls_i_uncompressed; 3105 st->vj.vjs_compressedin = vj->sls_i_compressed; 3106 } 3107 3108 /* 3109 * Stuff for handling the lists of ppp units and channels 3110 * and for initialization. 3111 */ 3112 3113 /* 3114 * Create a new ppp interface unit. Fails if it can't allocate memory 3115 * or if there is already a unit with the requested number. 3116 * unit == -1 means allocate a new number. 3117 */ 3118 static int ppp_create_interface(struct net *net, struct file *file, int *unit) 3119 { 3120 struct ppp_config conf = { 3121 .file = file, 3122 .unit = *unit, 3123 .ifname_is_set = false, 3124 }; 3125 struct net_device *dev; 3126 struct ppp *ppp; 3127 int err; 3128 3129 dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup); 3130 if (!dev) { 3131 err = -ENOMEM; 3132 goto err; 3133 } 3134 dev_net_set(dev, net); 3135 dev->rtnl_link_ops = &ppp_link_ops; 3136 3137 rtnl_lock(); 3138 3139 err = ppp_dev_configure(net, dev, &conf); 3140 if (err < 0) 3141 goto err_dev; 3142 ppp = netdev_priv(dev); 3143 *unit = ppp->file.index; 3144 3145 rtnl_unlock(); 3146 3147 return 0; 3148 3149 err_dev: 3150 rtnl_unlock(); 3151 free_netdev(dev); 3152 err: 3153 return err; 3154 } 3155 3156 /* 3157 * Initialize a ppp_file structure. 3158 */ 3159 static void 3160 init_ppp_file(struct ppp_file *pf, int kind) 3161 { 3162 pf->kind = kind; 3163 skb_queue_head_init(&pf->xq); 3164 skb_queue_head_init(&pf->rq); 3165 refcount_set(&pf->refcnt, 1); 3166 init_waitqueue_head(&pf->rwait); 3167 } 3168 3169 /* 3170 * Free the memory used by a ppp unit. This is only called once 3171 * there are no channels connected to the unit and no file structs 3172 * that reference the unit. 3173 */ 3174 static void ppp_destroy_interface(struct ppp *ppp) 3175 { 3176 atomic_dec(&ppp_unit_count); 3177 3178 if (!ppp->file.dead || ppp->n_channels) { 3179 /* "can't happen" */ 3180 netdev_err(ppp->dev, "ppp: destroying ppp struct %p " 3181 "but dead=%d n_channels=%d !\n", 3182 ppp, ppp->file.dead, ppp->n_channels); 3183 return; 3184 } 3185 3186 ppp_ccp_closed(ppp); 3187 if (ppp->vj) { 3188 slhc_free(ppp->vj); 3189 ppp->vj = NULL; 3190 } 3191 skb_queue_purge(&ppp->file.xq); 3192 skb_queue_purge(&ppp->file.rq); 3193 #ifdef CONFIG_PPP_MULTILINK 3194 skb_queue_purge(&ppp->mrq); 3195 #endif /* CONFIG_PPP_MULTILINK */ 3196 #ifdef CONFIG_PPP_FILTER 3197 if (ppp->pass_filter) { 3198 bpf_prog_destroy(ppp->pass_filter); 3199 ppp->pass_filter = NULL; 3200 } 3201 3202 if (ppp->active_filter) { 3203 bpf_prog_destroy(ppp->active_filter); 3204 ppp->active_filter = NULL; 3205 } 3206 #endif /* CONFIG_PPP_FILTER */ 3207 3208 kfree_skb(ppp->xmit_pending); 3209 free_percpu(ppp->xmit_recursion); 3210 3211 free_netdev(ppp->dev); 3212 } 3213 3214 /* 3215 * Locate an existing ppp unit. 3216 * The caller should have locked the all_ppp_mutex. 3217 */ 3218 static struct ppp * 3219 ppp_find_unit(struct ppp_net *pn, int unit) 3220 { 3221 return unit_find(&pn->units_idr, unit); 3222 } 3223 3224 /* 3225 * Locate an existing ppp channel. 3226 * The caller should have locked the all_channels_lock. 3227 * First we look in the new_channels list, then in the 3228 * all_channels list. If found in the new_channels list, 3229 * we move it to the all_channels list. This is for speed 3230 * when we have a lot of channels in use. 3231 */ 3232 static struct channel * 3233 ppp_find_channel(struct ppp_net *pn, int unit) 3234 { 3235 struct channel *pch; 3236 3237 list_for_each_entry(pch, &pn->new_channels, list) { 3238 if (pch->file.index == unit) { 3239 list_move(&pch->list, &pn->all_channels); 3240 return pch; 3241 } 3242 } 3243 3244 list_for_each_entry(pch, &pn->all_channels, list) { 3245 if (pch->file.index == unit) 3246 return pch; 3247 } 3248 3249 return NULL; 3250 } 3251 3252 /* 3253 * Connect a PPP channel to a PPP interface unit. 3254 */ 3255 static int 3256 ppp_connect_channel(struct channel *pch, int unit) 3257 { 3258 struct ppp *ppp; 3259 struct ppp_net *pn; 3260 int ret = -ENXIO; 3261 int hdrlen; 3262 3263 pn = ppp_pernet(pch->chan_net); 3264 3265 mutex_lock(&pn->all_ppp_mutex); 3266 ppp = ppp_find_unit(pn, unit); 3267 if (!ppp) 3268 goto out; 3269 write_lock_bh(&pch->upl); 3270 ret = -EINVAL; 3271 if (pch->ppp) 3272 goto outl; 3273 3274 ppp_lock(ppp); 3275 spin_lock_bh(&pch->downl); 3276 if (!pch->chan) { 3277 /* Don't connect unregistered channels */ 3278 spin_unlock_bh(&pch->downl); 3279 ppp_unlock(ppp); 3280 ret = -ENOTCONN; 3281 goto outl; 3282 } 3283 spin_unlock_bh(&pch->downl); 3284 if (pch->file.hdrlen > ppp->file.hdrlen) 3285 ppp->file.hdrlen = pch->file.hdrlen; 3286 hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ 3287 if (hdrlen > ppp->dev->hard_header_len) 3288 ppp->dev->hard_header_len = hdrlen; 3289 list_add_tail(&pch->clist, &ppp->channels); 3290 ++ppp->n_channels; 3291 pch->ppp = ppp; 3292 refcount_inc(&ppp->file.refcnt); 3293 ppp_unlock(ppp); 3294 ret = 0; 3295 3296 outl: 3297 write_unlock_bh(&pch->upl); 3298 out: 3299 mutex_unlock(&pn->all_ppp_mutex); 3300 return ret; 3301 } 3302 3303 /* 3304 * Disconnect a channel from its ppp unit. 3305 */ 3306 static int 3307 ppp_disconnect_channel(struct channel *pch) 3308 { 3309 struct ppp *ppp; 3310 int err = -EINVAL; 3311 3312 write_lock_bh(&pch->upl); 3313 ppp = pch->ppp; 3314 pch->ppp = NULL; 3315 write_unlock_bh(&pch->upl); 3316 if (ppp) { 3317 /* remove it from the ppp unit's list */ 3318 ppp_lock(ppp); 3319 list_del(&pch->clist); 3320 if (--ppp->n_channels == 0) 3321 wake_up_interruptible(&ppp->file.rwait); 3322 ppp_unlock(ppp); 3323 if (refcount_dec_and_test(&ppp->file.refcnt)) 3324 ppp_destroy_interface(ppp); 3325 err = 0; 3326 } 3327 return err; 3328 } 3329 3330 /* 3331 * Free up the resources used by a ppp channel. 3332 */ 3333 static void ppp_destroy_channel(struct channel *pch) 3334 { 3335 put_net(pch->chan_net); 3336 pch->chan_net = NULL; 3337 3338 atomic_dec(&channel_count); 3339 3340 if (!pch->file.dead) { 3341 /* "can't happen" */ 3342 pr_err("ppp: destroying undead channel %p !\n", pch); 3343 return; 3344 } 3345 skb_queue_purge(&pch->file.xq); 3346 skb_queue_purge(&pch->file.rq); 3347 kfree(pch); 3348 } 3349 3350 static void __exit ppp_cleanup(void) 3351 { 3352 /* should never happen */ 3353 if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count)) 3354 pr_err("PPP: removing module but units remain!\n"); 3355 rtnl_link_unregister(&ppp_link_ops); 3356 unregister_chrdev(PPP_MAJOR, "ppp"); 3357 device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0)); 3358 class_destroy(ppp_class); 3359 unregister_pernet_device(&ppp_net_ops); 3360 } 3361 3362 /* 3363 * Units handling. Caller must protect concurrent access 3364 * by holding all_ppp_mutex 3365 */ 3366 3367 /* associate pointer with specified number */ 3368 static int unit_set(struct idr *p, void *ptr, int n) 3369 { 3370 int unit; 3371 3372 unit = idr_alloc(p, ptr, n, n + 1, GFP_KERNEL); 3373 if (unit == -ENOSPC) 3374 unit = -EINVAL; 3375 return unit; 3376 } 3377 3378 /* get new free unit number and associate pointer with it */ 3379 static int unit_get(struct idr *p, void *ptr) 3380 { 3381 return idr_alloc(p, ptr, 0, 0, GFP_KERNEL); 3382 } 3383 3384 /* put unit number back to a pool */ 3385 static void unit_put(struct idr *p, int n) 3386 { 3387 idr_remove(p, n); 3388 } 3389 3390 /* get pointer associated with the number */ 3391 static void *unit_find(struct idr *p, int n) 3392 { 3393 return idr_find(p, n); 3394 } 3395 3396 /* Module/initialization stuff */ 3397 3398 module_init(ppp_init); 3399 module_exit(ppp_cleanup); 3400 3401 EXPORT_SYMBOL(ppp_register_net_channel); 3402 EXPORT_SYMBOL(ppp_register_channel); 3403 EXPORT_SYMBOL(ppp_unregister_channel); 3404 EXPORT_SYMBOL(ppp_channel_index); 3405 EXPORT_SYMBOL(ppp_unit_number); 3406 EXPORT_SYMBOL(ppp_dev_name); 3407 EXPORT_SYMBOL(ppp_input); 3408 EXPORT_SYMBOL(ppp_input_error); 3409 EXPORT_SYMBOL(ppp_output_wakeup); 3410 EXPORT_SYMBOL(ppp_register_compressor); 3411 EXPORT_SYMBOL(ppp_unregister_compressor); 3412 MODULE_LICENSE("GPL"); 3413 MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0); 3414 MODULE_ALIAS_RTNL_LINK("ppp"); 3415 MODULE_ALIAS("devname:ppp"); 3416