1 /* 2 * ip6_flowlabel.c IPv6 flowlabel manager. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 */ 11 12 #include <linux/capability.h> 13 #include <linux/errno.h> 14 #include <linux/types.h> 15 #include <linux/socket.h> 16 #include <linux/net.h> 17 #include <linux/netdevice.h> 18 #include <linux/if_arp.h> 19 #include <linux/in6.h> 20 #include <linux/route.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/slab.h> 24 #include <linux/export.h> 25 #include <linux/pid_namespace.h> 26 27 #include <net/net_namespace.h> 28 #include <net/sock.h> 29 30 #include <net/ipv6.h> 31 #include <net/ndisc.h> 32 #include <net/protocol.h> 33 #include <net/ip6_route.h> 34 #include <net/addrconf.h> 35 #include <net/rawv6.h> 36 #include <net/icmp.h> 37 #include <net/transp_v6.h> 38 39 #include <asm/uaccess.h> 40 41 #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified 42 in old IPv6 RFC. Well, it was reasonable value. 43 */ 44 #define FL_MAX_LINGER 60 /* Maximal linger timeout */ 45 46 /* FL hash table */ 47 48 #define FL_MAX_PER_SOCK 32 49 #define FL_MAX_SIZE 4096 50 #define FL_HASH_MASK 255 51 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK) 52 53 static atomic_t fl_size = ATOMIC_INIT(0); 54 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1]; 55 56 static void ip6_fl_gc(unsigned long dummy); 57 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0); 58 59 /* FL hash table lock: it protects only of GC */ 60 61 static DEFINE_SPINLOCK(ip6_fl_lock); 62 63 /* Big socket sock */ 64 65 static DEFINE_SPINLOCK(ip6_sk_fl_lock); 66 67 #define for_each_fl_rcu(hash, fl) \ 68 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \ 69 fl != NULL; \ 70 fl = rcu_dereference_bh(fl->next)) 71 #define for_each_fl_continue_rcu(fl) \ 72 for (fl = rcu_dereference_bh(fl->next); \ 73 fl != NULL; \ 74 fl = rcu_dereference_bh(fl->next)) 75 76 #define for_each_sk_fl_rcu(np, sfl) \ 77 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \ 78 sfl != NULL; \ 79 sfl = rcu_dereference_bh(sfl->next)) 80 81 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label) 82 { 83 struct ip6_flowlabel *fl; 84 85 for_each_fl_rcu(FL_HASH(label), fl) { 86 if (fl->label == label && net_eq(fl->fl_net, net)) 87 return fl; 88 } 89 return NULL; 90 } 91 92 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label) 93 { 94 struct ip6_flowlabel *fl; 95 96 rcu_read_lock_bh(); 97 fl = __fl_lookup(net, label); 98 if (fl && !atomic_inc_not_zero(&fl->users)) 99 fl = NULL; 100 rcu_read_unlock_bh(); 101 return fl; 102 } 103 104 105 static void fl_free(struct ip6_flowlabel *fl) 106 { 107 if (fl) { 108 if (fl->share == IPV6_FL_S_PROCESS) 109 put_pid(fl->owner.pid); 110 release_net(fl->fl_net); 111 kfree(fl->opt); 112 kfree_rcu(fl, rcu); 113 } 114 } 115 116 static void fl_release(struct ip6_flowlabel *fl) 117 { 118 spin_lock_bh(&ip6_fl_lock); 119 120 fl->lastuse = jiffies; 121 if (atomic_dec_and_test(&fl->users)) { 122 unsigned long ttd = fl->lastuse + fl->linger; 123 if (time_after(ttd, fl->expires)) 124 fl->expires = ttd; 125 ttd = fl->expires; 126 if (fl->opt && fl->share == IPV6_FL_S_EXCL) { 127 struct ipv6_txoptions *opt = fl->opt; 128 fl->opt = NULL; 129 kfree(opt); 130 } 131 if (!timer_pending(&ip6_fl_gc_timer) || 132 time_after(ip6_fl_gc_timer.expires, ttd)) 133 mod_timer(&ip6_fl_gc_timer, ttd); 134 } 135 spin_unlock_bh(&ip6_fl_lock); 136 } 137 138 static void ip6_fl_gc(unsigned long dummy) 139 { 140 int i; 141 unsigned long now = jiffies; 142 unsigned long sched = 0; 143 144 spin_lock(&ip6_fl_lock); 145 146 for (i=0; i<=FL_HASH_MASK; i++) { 147 struct ip6_flowlabel *fl; 148 struct ip6_flowlabel __rcu **flp; 149 150 flp = &fl_ht[i]; 151 while ((fl = rcu_dereference_protected(*flp, 152 lockdep_is_held(&ip6_fl_lock))) != NULL) { 153 if (atomic_read(&fl->users) == 0) { 154 unsigned long ttd = fl->lastuse + fl->linger; 155 if (time_after(ttd, fl->expires)) 156 fl->expires = ttd; 157 ttd = fl->expires; 158 if (time_after_eq(now, ttd)) { 159 *flp = fl->next; 160 fl_free(fl); 161 atomic_dec(&fl_size); 162 continue; 163 } 164 if (!sched || time_before(ttd, sched)) 165 sched = ttd; 166 } 167 flp = &fl->next; 168 } 169 } 170 if (!sched && atomic_read(&fl_size)) 171 sched = now + FL_MAX_LINGER; 172 if (sched) { 173 mod_timer(&ip6_fl_gc_timer, sched); 174 } 175 spin_unlock(&ip6_fl_lock); 176 } 177 178 static void __net_exit ip6_fl_purge(struct net *net) 179 { 180 int i; 181 182 spin_lock(&ip6_fl_lock); 183 for (i = 0; i <= FL_HASH_MASK; i++) { 184 struct ip6_flowlabel *fl; 185 struct ip6_flowlabel __rcu **flp; 186 187 flp = &fl_ht[i]; 188 while ((fl = rcu_dereference_protected(*flp, 189 lockdep_is_held(&ip6_fl_lock))) != NULL) { 190 if (net_eq(fl->fl_net, net) && 191 atomic_read(&fl->users) == 0) { 192 *flp = fl->next; 193 fl_free(fl); 194 atomic_dec(&fl_size); 195 continue; 196 } 197 flp = &fl->next; 198 } 199 } 200 spin_unlock(&ip6_fl_lock); 201 } 202 203 static struct ip6_flowlabel *fl_intern(struct net *net, 204 struct ip6_flowlabel *fl, __be32 label) 205 { 206 struct ip6_flowlabel *lfl; 207 208 fl->label = label & IPV6_FLOWLABEL_MASK; 209 210 spin_lock_bh(&ip6_fl_lock); 211 if (label == 0) { 212 for (;;) { 213 fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK; 214 if (fl->label) { 215 lfl = __fl_lookup(net, fl->label); 216 if (lfl == NULL) 217 break; 218 } 219 } 220 } else { 221 /* 222 * we dropper the ip6_fl_lock, so this entry could reappear 223 * and we need to recheck with it. 224 * 225 * OTOH no need to search the active socket first, like it is 226 * done in ipv6_flowlabel_opt - sock is locked, so new entry 227 * with the same label can only appear on another sock 228 */ 229 lfl = __fl_lookup(net, fl->label); 230 if (lfl != NULL) { 231 atomic_inc(&lfl->users); 232 spin_unlock_bh(&ip6_fl_lock); 233 return lfl; 234 } 235 } 236 237 fl->lastuse = jiffies; 238 fl->next = fl_ht[FL_HASH(fl->label)]; 239 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl); 240 atomic_inc(&fl_size); 241 spin_unlock_bh(&ip6_fl_lock); 242 return NULL; 243 } 244 245 246 247 /* Socket flowlabel lists */ 248 249 struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label) 250 { 251 struct ipv6_fl_socklist *sfl; 252 struct ipv6_pinfo *np = inet6_sk(sk); 253 254 label &= IPV6_FLOWLABEL_MASK; 255 256 rcu_read_lock_bh(); 257 for_each_sk_fl_rcu(np, sfl) { 258 struct ip6_flowlabel *fl = sfl->fl; 259 if (fl->label == label) { 260 fl->lastuse = jiffies; 261 atomic_inc(&fl->users); 262 rcu_read_unlock_bh(); 263 return fl; 264 } 265 } 266 rcu_read_unlock_bh(); 267 return NULL; 268 } 269 270 EXPORT_SYMBOL_GPL(fl6_sock_lookup); 271 272 void fl6_free_socklist(struct sock *sk) 273 { 274 struct ipv6_pinfo *np = inet6_sk(sk); 275 struct ipv6_fl_socklist *sfl; 276 277 if (!rcu_access_pointer(np->ipv6_fl_list)) 278 return; 279 280 spin_lock_bh(&ip6_sk_fl_lock); 281 while ((sfl = rcu_dereference_protected(np->ipv6_fl_list, 282 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) { 283 np->ipv6_fl_list = sfl->next; 284 spin_unlock_bh(&ip6_sk_fl_lock); 285 286 fl_release(sfl->fl); 287 kfree_rcu(sfl, rcu); 288 289 spin_lock_bh(&ip6_sk_fl_lock); 290 } 291 spin_unlock_bh(&ip6_sk_fl_lock); 292 } 293 294 /* Service routines */ 295 296 297 /* 298 It is the only difficult place. flowlabel enforces equal headers 299 before and including routing header, however user may supply options 300 following rthdr. 301 */ 302 303 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space, 304 struct ip6_flowlabel * fl, 305 struct ipv6_txoptions * fopt) 306 { 307 struct ipv6_txoptions * fl_opt = fl->opt; 308 309 if (fopt == NULL || fopt->opt_flen == 0) 310 return fl_opt; 311 312 if (fl_opt != NULL) { 313 opt_space->hopopt = fl_opt->hopopt; 314 opt_space->dst0opt = fl_opt->dst0opt; 315 opt_space->srcrt = fl_opt->srcrt; 316 opt_space->opt_nflen = fl_opt->opt_nflen; 317 } else { 318 if (fopt->opt_nflen == 0) 319 return fopt; 320 opt_space->hopopt = NULL; 321 opt_space->dst0opt = NULL; 322 opt_space->srcrt = NULL; 323 opt_space->opt_nflen = 0; 324 } 325 opt_space->dst1opt = fopt->dst1opt; 326 opt_space->opt_flen = fopt->opt_flen; 327 return opt_space; 328 } 329 EXPORT_SYMBOL_GPL(fl6_merge_options); 330 331 static unsigned long check_linger(unsigned long ttl) 332 { 333 if (ttl < FL_MIN_LINGER) 334 return FL_MIN_LINGER*HZ; 335 if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN)) 336 return 0; 337 return ttl*HZ; 338 } 339 340 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires) 341 { 342 linger = check_linger(linger); 343 if (!linger) 344 return -EPERM; 345 expires = check_linger(expires); 346 if (!expires) 347 return -EPERM; 348 fl->lastuse = jiffies; 349 if (time_before(fl->linger, linger)) 350 fl->linger = linger; 351 if (time_before(expires, fl->linger)) 352 expires = fl->linger; 353 if (time_before(fl->expires, fl->lastuse + expires)) 354 fl->expires = fl->lastuse + expires; 355 return 0; 356 } 357 358 static struct ip6_flowlabel * 359 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, 360 char __user *optval, int optlen, int *err_p) 361 { 362 struct ip6_flowlabel *fl = NULL; 363 int olen; 364 int addr_type; 365 int err; 366 367 olen = optlen - CMSG_ALIGN(sizeof(*freq)); 368 err = -EINVAL; 369 if (olen > 64 * 1024) 370 goto done; 371 372 err = -ENOMEM; 373 fl = kzalloc(sizeof(*fl), GFP_KERNEL); 374 if (fl == NULL) 375 goto done; 376 377 if (olen > 0) { 378 struct msghdr msg; 379 struct flowi6 flowi6; 380 int junk; 381 382 err = -ENOMEM; 383 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); 384 if (fl->opt == NULL) 385 goto done; 386 387 memset(fl->opt, 0, sizeof(*fl->opt)); 388 fl->opt->tot_len = sizeof(*fl->opt) + olen; 389 err = -EFAULT; 390 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) 391 goto done; 392 393 msg.msg_controllen = olen; 394 msg.msg_control = (void*)(fl->opt+1); 395 memset(&flowi6, 0, sizeof(flowi6)); 396 397 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, 398 &junk, &junk, &junk); 399 if (err) 400 goto done; 401 err = -EINVAL; 402 if (fl->opt->opt_flen) 403 goto done; 404 if (fl->opt->opt_nflen == 0) { 405 kfree(fl->opt); 406 fl->opt = NULL; 407 } 408 } 409 410 fl->fl_net = hold_net(net); 411 fl->expires = jiffies; 412 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); 413 if (err) 414 goto done; 415 fl->share = freq->flr_share; 416 addr_type = ipv6_addr_type(&freq->flr_dst); 417 if ((addr_type & IPV6_ADDR_MAPPED) || 418 addr_type == IPV6_ADDR_ANY) { 419 err = -EINVAL; 420 goto done; 421 } 422 fl->dst = freq->flr_dst; 423 atomic_set(&fl->users, 1); 424 switch (fl->share) { 425 case IPV6_FL_S_EXCL: 426 case IPV6_FL_S_ANY: 427 break; 428 case IPV6_FL_S_PROCESS: 429 fl->owner.pid = get_task_pid(current, PIDTYPE_PID); 430 break; 431 case IPV6_FL_S_USER: 432 fl->owner.uid = current_euid(); 433 break; 434 default: 435 err = -EINVAL; 436 goto done; 437 } 438 return fl; 439 440 done: 441 fl_free(fl); 442 *err_p = err; 443 return NULL; 444 } 445 446 static int mem_check(struct sock *sk) 447 { 448 struct ipv6_pinfo *np = inet6_sk(sk); 449 struct ipv6_fl_socklist *sfl; 450 int room = FL_MAX_SIZE - atomic_read(&fl_size); 451 int count = 0; 452 453 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK) 454 return 0; 455 456 for_each_sk_fl_rcu(np, sfl) 457 count++; 458 459 if (room <= 0 || 460 ((count >= FL_MAX_PER_SOCK || 461 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) && 462 !capable(CAP_NET_ADMIN))) 463 return -ENOBUFS; 464 465 return 0; 466 } 467 468 static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2) 469 { 470 if (h1 == h2) 471 return false; 472 if (h1 == NULL || h2 == NULL) 473 return true; 474 if (h1->hdrlen != h2->hdrlen) 475 return true; 476 return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1)); 477 } 478 479 static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2) 480 { 481 if (o1 == o2) 482 return false; 483 if (o1 == NULL || o2 == NULL) 484 return true; 485 if (o1->opt_nflen != o2->opt_nflen) 486 return true; 487 if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt)) 488 return true; 489 if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt)) 490 return true; 491 if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt)) 492 return true; 493 return false; 494 } 495 496 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl, 497 struct ip6_flowlabel *fl) 498 { 499 spin_lock_bh(&ip6_sk_fl_lock); 500 sfl->fl = fl; 501 sfl->next = np->ipv6_fl_list; 502 rcu_assign_pointer(np->ipv6_fl_list, sfl); 503 spin_unlock_bh(&ip6_sk_fl_lock); 504 } 505 506 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) 507 { 508 int uninitialized_var(err); 509 struct net *net = sock_net(sk); 510 struct ipv6_pinfo *np = inet6_sk(sk); 511 struct in6_flowlabel_req freq; 512 struct ipv6_fl_socklist *sfl1=NULL; 513 struct ipv6_fl_socklist *sfl; 514 struct ipv6_fl_socklist __rcu **sflp; 515 struct ip6_flowlabel *fl, *fl1 = NULL; 516 517 518 if (optlen < sizeof(freq)) 519 return -EINVAL; 520 521 if (copy_from_user(&freq, optval, sizeof(freq))) 522 return -EFAULT; 523 524 switch (freq.flr_action) { 525 case IPV6_FL_A_PUT: 526 spin_lock_bh(&ip6_sk_fl_lock); 527 for (sflp = &np->ipv6_fl_list; 528 (sfl = rcu_dereference(*sflp))!=NULL; 529 sflp = &sfl->next) { 530 if (sfl->fl->label == freq.flr_label) { 531 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) 532 np->flow_label &= ~IPV6_FLOWLABEL_MASK; 533 *sflp = rcu_dereference(sfl->next); 534 spin_unlock_bh(&ip6_sk_fl_lock); 535 fl_release(sfl->fl); 536 kfree_rcu(sfl, rcu); 537 return 0; 538 } 539 } 540 spin_unlock_bh(&ip6_sk_fl_lock); 541 return -ESRCH; 542 543 case IPV6_FL_A_RENEW: 544 rcu_read_lock_bh(); 545 for_each_sk_fl_rcu(np, sfl) { 546 if (sfl->fl->label == freq.flr_label) { 547 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); 548 rcu_read_unlock_bh(); 549 return err; 550 } 551 } 552 rcu_read_unlock_bh(); 553 554 if (freq.flr_share == IPV6_FL_S_NONE && 555 ns_capable(net->user_ns, CAP_NET_ADMIN)) { 556 fl = fl_lookup(net, freq.flr_label); 557 if (fl) { 558 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); 559 fl_release(fl); 560 return err; 561 } 562 } 563 return -ESRCH; 564 565 case IPV6_FL_A_GET: 566 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) 567 return -EINVAL; 568 569 fl = fl_create(net, sk, &freq, optval, optlen, &err); 570 if (fl == NULL) 571 return err; 572 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); 573 574 if (freq.flr_label) { 575 err = -EEXIST; 576 rcu_read_lock_bh(); 577 for_each_sk_fl_rcu(np, sfl) { 578 if (sfl->fl->label == freq.flr_label) { 579 if (freq.flr_flags&IPV6_FL_F_EXCL) { 580 rcu_read_unlock_bh(); 581 goto done; 582 } 583 fl1 = sfl->fl; 584 atomic_inc(&fl1->users); 585 break; 586 } 587 } 588 rcu_read_unlock_bh(); 589 590 if (fl1 == NULL) 591 fl1 = fl_lookup(net, freq.flr_label); 592 if (fl1) { 593 recheck: 594 err = -EEXIST; 595 if (freq.flr_flags&IPV6_FL_F_EXCL) 596 goto release; 597 err = -EPERM; 598 if (fl1->share == IPV6_FL_S_EXCL || 599 fl1->share != fl->share || 600 ((fl1->share == IPV6_FL_S_PROCESS) && 601 (fl1->owner.pid == fl->owner.pid)) || 602 ((fl1->share == IPV6_FL_S_USER) && 603 uid_eq(fl1->owner.uid, fl->owner.uid))) 604 goto release; 605 606 err = -EINVAL; 607 if (!ipv6_addr_equal(&fl1->dst, &fl->dst) || 608 ipv6_opt_cmp(fl1->opt, fl->opt)) 609 goto release; 610 611 err = -ENOMEM; 612 if (sfl1 == NULL) 613 goto release; 614 if (fl->linger > fl1->linger) 615 fl1->linger = fl->linger; 616 if ((long)(fl->expires - fl1->expires) > 0) 617 fl1->expires = fl->expires; 618 fl_link(np, sfl1, fl1); 619 fl_free(fl); 620 return 0; 621 622 release: 623 fl_release(fl1); 624 goto done; 625 } 626 } 627 err = -ENOENT; 628 if (!(freq.flr_flags&IPV6_FL_F_CREATE)) 629 goto done; 630 631 err = -ENOMEM; 632 if (sfl1 == NULL || (err = mem_check(sk)) != 0) 633 goto done; 634 635 fl1 = fl_intern(net, fl, freq.flr_label); 636 if (fl1 != NULL) 637 goto recheck; 638 639 if (!freq.flr_label) { 640 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, 641 &fl->label, sizeof(fl->label))) { 642 /* Intentionally ignore fault. */ 643 } 644 } 645 646 fl_link(np, sfl1, fl); 647 return 0; 648 649 default: 650 return -EINVAL; 651 } 652 653 done: 654 fl_free(fl); 655 kfree(sfl1); 656 return err; 657 } 658 659 #ifdef CONFIG_PROC_FS 660 661 struct ip6fl_iter_state { 662 struct seq_net_private p; 663 struct pid_namespace *pid_ns; 664 int bucket; 665 }; 666 667 #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private) 668 669 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq) 670 { 671 struct ip6_flowlabel *fl = NULL; 672 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 673 struct net *net = seq_file_net(seq); 674 675 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) { 676 for_each_fl_rcu(state->bucket, fl) { 677 if (net_eq(fl->fl_net, net)) 678 goto out; 679 } 680 } 681 fl = NULL; 682 out: 683 return fl; 684 } 685 686 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl) 687 { 688 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 689 struct net *net = seq_file_net(seq); 690 691 for_each_fl_continue_rcu(fl) { 692 if (net_eq(fl->fl_net, net)) 693 goto out; 694 } 695 696 try_again: 697 if (++state->bucket <= FL_HASH_MASK) { 698 for_each_fl_rcu(state->bucket, fl) { 699 if (net_eq(fl->fl_net, net)) 700 goto out; 701 } 702 goto try_again; 703 } 704 fl = NULL; 705 706 out: 707 return fl; 708 } 709 710 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos) 711 { 712 struct ip6_flowlabel *fl = ip6fl_get_first(seq); 713 if (fl) 714 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL) 715 --pos; 716 return pos ? NULL : fl; 717 } 718 719 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos) 720 __acquires(RCU) 721 { 722 rcu_read_lock_bh(); 723 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 724 } 725 726 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos) 727 { 728 struct ip6_flowlabel *fl; 729 730 if (v == SEQ_START_TOKEN) 731 fl = ip6fl_get_first(seq); 732 else 733 fl = ip6fl_get_next(seq, v); 734 ++*pos; 735 return fl; 736 } 737 738 static void ip6fl_seq_stop(struct seq_file *seq, void *v) 739 __releases(RCU) 740 { 741 rcu_read_unlock_bh(); 742 } 743 744 static int ip6fl_seq_show(struct seq_file *seq, void *v) 745 { 746 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 747 if (v == SEQ_START_TOKEN) 748 seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", 749 "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); 750 else { 751 struct ip6_flowlabel *fl = v; 752 seq_printf(seq, 753 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", 754 (unsigned int)ntohl(fl->label), 755 fl->share, 756 ((fl->share == IPV6_FL_S_PROCESS) ? 757 pid_nr_ns(fl->owner.pid, state->pid_ns) : 758 ((fl->share == IPV6_FL_S_USER) ? 759 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) : 760 0)), 761 atomic_read(&fl->users), 762 fl->linger/HZ, 763 (long)(fl->expires - jiffies)/HZ, 764 &fl->dst, 765 fl->opt ? fl->opt->opt_nflen : 0); 766 } 767 return 0; 768 } 769 770 static const struct seq_operations ip6fl_seq_ops = { 771 .start = ip6fl_seq_start, 772 .next = ip6fl_seq_next, 773 .stop = ip6fl_seq_stop, 774 .show = ip6fl_seq_show, 775 }; 776 777 static int ip6fl_seq_open(struct inode *inode, struct file *file) 778 { 779 struct seq_file *seq; 780 struct ip6fl_iter_state *state; 781 int err; 782 783 err = seq_open_net(inode, file, &ip6fl_seq_ops, 784 sizeof(struct ip6fl_iter_state)); 785 786 if (!err) { 787 seq = file->private_data; 788 state = ip6fl_seq_private(seq); 789 rcu_read_lock(); 790 state->pid_ns = get_pid_ns(task_active_pid_ns(current)); 791 rcu_read_unlock(); 792 } 793 return err; 794 } 795 796 static int ip6fl_seq_release(struct inode *inode, struct file *file) 797 { 798 struct seq_file *seq = file->private_data; 799 struct ip6fl_iter_state *state = ip6fl_seq_private(seq); 800 put_pid_ns(state->pid_ns); 801 return seq_release_net(inode, file); 802 } 803 804 static const struct file_operations ip6fl_seq_fops = { 805 .owner = THIS_MODULE, 806 .open = ip6fl_seq_open, 807 .read = seq_read, 808 .llseek = seq_lseek, 809 .release = ip6fl_seq_release, 810 }; 811 812 static int __net_init ip6_flowlabel_proc_init(struct net *net) 813 { 814 if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net, 815 &ip6fl_seq_fops)) 816 return -ENOMEM; 817 return 0; 818 } 819 820 static void __net_exit ip6_flowlabel_proc_fini(struct net *net) 821 { 822 remove_proc_entry("ip6_flowlabel", net->proc_net); 823 } 824 #else 825 static inline int ip6_flowlabel_proc_init(struct net *net) 826 { 827 return 0; 828 } 829 static inline void ip6_flowlabel_proc_fini(struct net *net) 830 { 831 } 832 #endif 833 834 static void __net_exit ip6_flowlabel_net_exit(struct net *net) 835 { 836 ip6_fl_purge(net); 837 ip6_flowlabel_proc_fini(net); 838 } 839 840 static struct pernet_operations ip6_flowlabel_net_ops = { 841 .init = ip6_flowlabel_proc_init, 842 .exit = ip6_flowlabel_net_exit, 843 }; 844 845 int ip6_flowlabel_init(void) 846 { 847 return register_pernet_subsys(&ip6_flowlabel_net_ops); 848 } 849 850 void ip6_flowlabel_cleanup(void) 851 { 852 del_timer(&ip6_fl_gc_timer); 853 unregister_pernet_subsys(&ip6_flowlabel_net_ops); 854 } 855