xref: /openbmc/linux/net/ipv6/ip6_flowlabel.c (revision 9d749629)
1 /*
2  *	ip6_flowlabel.c		IPv6 flowlabel manager.
3  *
4  *	This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *	Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/if_arp.h>
19 #include <linux/in6.h>
20 #include <linux/route.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25 #include <linux/pid_namespace.h>
26 
27 #include <net/net_namespace.h>
28 #include <net/sock.h>
29 
30 #include <net/ipv6.h>
31 #include <net/ndisc.h>
32 #include <net/protocol.h>
33 #include <net/ip6_route.h>
34 #include <net/addrconf.h>
35 #include <net/rawv6.h>
36 #include <net/icmp.h>
37 #include <net/transp_v6.h>
38 
39 #include <asm/uaccess.h>
40 
41 #define FL_MIN_LINGER	6	/* Minimal linger. It is set to 6sec specified
42 				   in old IPv6 RFC. Well, it was reasonable value.
43 				 */
44 #define FL_MAX_LINGER	60	/* Maximal linger timeout */
45 
46 /* FL hash table */
47 
48 #define FL_MAX_PER_SOCK	32
49 #define FL_MAX_SIZE	4096
50 #define FL_HASH_MASK	255
51 #define FL_HASH(l)	(ntohl(l)&FL_HASH_MASK)
52 
53 static atomic_t fl_size = ATOMIC_INIT(0);
54 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
55 
56 static void ip6_fl_gc(unsigned long dummy);
57 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
58 
59 /* FL hash table lock: it protects only of GC */
60 
61 static DEFINE_SPINLOCK(ip6_fl_lock);
62 
63 /* Big socket sock */
64 
65 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
66 
67 #define for_each_fl_rcu(hash, fl)				\
68 	for (fl = rcu_dereference_bh(fl_ht[(hash)]);		\
69 	     fl != NULL;					\
70 	     fl = rcu_dereference_bh(fl->next))
71 #define for_each_fl_continue_rcu(fl)				\
72 	for (fl = rcu_dereference_bh(fl->next);			\
73 	     fl != NULL;					\
74 	     fl = rcu_dereference_bh(fl->next))
75 
76 #define for_each_sk_fl_rcu(np, sfl)				\
77 	for (sfl = rcu_dereference_bh(np->ipv6_fl_list);	\
78 	     sfl != NULL;					\
79 	     sfl = rcu_dereference_bh(sfl->next))
80 
81 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
82 {
83 	struct ip6_flowlabel *fl;
84 
85 	for_each_fl_rcu(FL_HASH(label), fl) {
86 		if (fl->label == label && net_eq(fl->fl_net, net))
87 			return fl;
88 	}
89 	return NULL;
90 }
91 
92 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
93 {
94 	struct ip6_flowlabel *fl;
95 
96 	rcu_read_lock_bh();
97 	fl = __fl_lookup(net, label);
98 	if (fl && !atomic_inc_not_zero(&fl->users))
99 		fl = NULL;
100 	rcu_read_unlock_bh();
101 	return fl;
102 }
103 
104 
105 static void fl_free(struct ip6_flowlabel *fl)
106 {
107 	if (fl) {
108 		if (fl->share == IPV6_FL_S_PROCESS)
109 			put_pid(fl->owner.pid);
110 		release_net(fl->fl_net);
111 		kfree(fl->opt);
112 		kfree_rcu(fl, rcu);
113 	}
114 }
115 
116 static void fl_release(struct ip6_flowlabel *fl)
117 {
118 	spin_lock_bh(&ip6_fl_lock);
119 
120 	fl->lastuse = jiffies;
121 	if (atomic_dec_and_test(&fl->users)) {
122 		unsigned long ttd = fl->lastuse + fl->linger;
123 		if (time_after(ttd, fl->expires))
124 			fl->expires = ttd;
125 		ttd = fl->expires;
126 		if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
127 			struct ipv6_txoptions *opt = fl->opt;
128 			fl->opt = NULL;
129 			kfree(opt);
130 		}
131 		if (!timer_pending(&ip6_fl_gc_timer) ||
132 		    time_after(ip6_fl_gc_timer.expires, ttd))
133 			mod_timer(&ip6_fl_gc_timer, ttd);
134 	}
135 	spin_unlock_bh(&ip6_fl_lock);
136 }
137 
138 static void ip6_fl_gc(unsigned long dummy)
139 {
140 	int i;
141 	unsigned long now = jiffies;
142 	unsigned long sched = 0;
143 
144 	spin_lock(&ip6_fl_lock);
145 
146 	for (i=0; i<=FL_HASH_MASK; i++) {
147 		struct ip6_flowlabel *fl, **flp;
148 		flp = &fl_ht[i];
149 		while ((fl = rcu_dereference_protected(*flp,
150 						       lockdep_is_held(&ip6_fl_lock))) != NULL) {
151 			if (atomic_read(&fl->users) == 0) {
152 				unsigned long ttd = fl->lastuse + fl->linger;
153 				if (time_after(ttd, fl->expires))
154 					fl->expires = ttd;
155 				ttd = fl->expires;
156 				if (time_after_eq(now, ttd)) {
157 					*flp = fl->next;
158 					fl_free(fl);
159 					atomic_dec(&fl_size);
160 					continue;
161 				}
162 				if (!sched || time_before(ttd, sched))
163 					sched = ttd;
164 			}
165 			flp = &fl->next;
166 		}
167 	}
168 	if (!sched && atomic_read(&fl_size))
169 		sched = now + FL_MAX_LINGER;
170 	if (sched) {
171 		mod_timer(&ip6_fl_gc_timer, sched);
172 	}
173 	spin_unlock(&ip6_fl_lock);
174 }
175 
176 static void __net_exit ip6_fl_purge(struct net *net)
177 {
178 	int i;
179 
180 	spin_lock(&ip6_fl_lock);
181 	for (i = 0; i <= FL_HASH_MASK; i++) {
182 		struct ip6_flowlabel *fl, **flp;
183 		flp = &fl_ht[i];
184 		while ((fl = rcu_dereference_protected(*flp,
185 						       lockdep_is_held(&ip6_fl_lock))) != NULL) {
186 			if (net_eq(fl->fl_net, net) &&
187 			    atomic_read(&fl->users) == 0) {
188 				*flp = fl->next;
189 				fl_free(fl);
190 				atomic_dec(&fl_size);
191 				continue;
192 			}
193 			flp = &fl->next;
194 		}
195 	}
196 	spin_unlock(&ip6_fl_lock);
197 }
198 
199 static struct ip6_flowlabel *fl_intern(struct net *net,
200 				       struct ip6_flowlabel *fl, __be32 label)
201 {
202 	struct ip6_flowlabel *lfl;
203 
204 	fl->label = label & IPV6_FLOWLABEL_MASK;
205 
206 	spin_lock_bh(&ip6_fl_lock);
207 	if (label == 0) {
208 		for (;;) {
209 			fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
210 			if (fl->label) {
211 				lfl = __fl_lookup(net, fl->label);
212 				if (lfl == NULL)
213 					break;
214 			}
215 		}
216 	} else {
217 		/*
218 		 * we dropper the ip6_fl_lock, so this entry could reappear
219 		 * and we need to recheck with it.
220 		 *
221 		 * OTOH no need to search the active socket first, like it is
222 		 * done in ipv6_flowlabel_opt - sock is locked, so new entry
223 		 * with the same label can only appear on another sock
224 		 */
225 		lfl = __fl_lookup(net, fl->label);
226 		if (lfl != NULL) {
227 			atomic_inc(&lfl->users);
228 			spin_unlock_bh(&ip6_fl_lock);
229 			return lfl;
230 		}
231 	}
232 
233 	fl->lastuse = jiffies;
234 	fl->next = fl_ht[FL_HASH(fl->label)];
235 	rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
236 	atomic_inc(&fl_size);
237 	spin_unlock_bh(&ip6_fl_lock);
238 	return NULL;
239 }
240 
241 
242 
243 /* Socket flowlabel lists */
244 
245 struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
246 {
247 	struct ipv6_fl_socklist *sfl;
248 	struct ipv6_pinfo *np = inet6_sk(sk);
249 
250 	label &= IPV6_FLOWLABEL_MASK;
251 
252 	rcu_read_lock_bh();
253 	for_each_sk_fl_rcu(np, sfl) {
254 		struct ip6_flowlabel *fl = sfl->fl;
255 		if (fl->label == label) {
256 			fl->lastuse = jiffies;
257 			atomic_inc(&fl->users);
258 			rcu_read_unlock_bh();
259 			return fl;
260 		}
261 	}
262 	rcu_read_unlock_bh();
263 	return NULL;
264 }
265 
266 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
267 
268 void fl6_free_socklist(struct sock *sk)
269 {
270 	struct ipv6_pinfo *np = inet6_sk(sk);
271 	struct ipv6_fl_socklist *sfl;
272 
273 	if (!rcu_access_pointer(np->ipv6_fl_list))
274 		return;
275 
276 	spin_lock_bh(&ip6_sk_fl_lock);
277 	while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
278 						lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
279 		np->ipv6_fl_list = sfl->next;
280 		spin_unlock_bh(&ip6_sk_fl_lock);
281 
282 		fl_release(sfl->fl);
283 		kfree_rcu(sfl, rcu);
284 
285 		spin_lock_bh(&ip6_sk_fl_lock);
286 	}
287 	spin_unlock_bh(&ip6_sk_fl_lock);
288 }
289 
290 /* Service routines */
291 
292 
293 /*
294    It is the only difficult place. flowlabel enforces equal headers
295    before and including routing header, however user may supply options
296    following rthdr.
297  */
298 
299 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
300 					 struct ip6_flowlabel * fl,
301 					 struct ipv6_txoptions * fopt)
302 {
303 	struct ipv6_txoptions * fl_opt = fl->opt;
304 
305 	if (fopt == NULL || fopt->opt_flen == 0)
306 		return fl_opt;
307 
308 	if (fl_opt != NULL) {
309 		opt_space->hopopt = fl_opt->hopopt;
310 		opt_space->dst0opt = fl_opt->dst0opt;
311 		opt_space->srcrt = fl_opt->srcrt;
312 		opt_space->opt_nflen = fl_opt->opt_nflen;
313 	} else {
314 		if (fopt->opt_nflen == 0)
315 			return fopt;
316 		opt_space->hopopt = NULL;
317 		opt_space->dst0opt = NULL;
318 		opt_space->srcrt = NULL;
319 		opt_space->opt_nflen = 0;
320 	}
321 	opt_space->dst1opt = fopt->dst1opt;
322 	opt_space->opt_flen = fopt->opt_flen;
323 	return opt_space;
324 }
325 EXPORT_SYMBOL_GPL(fl6_merge_options);
326 
327 static unsigned long check_linger(unsigned long ttl)
328 {
329 	if (ttl < FL_MIN_LINGER)
330 		return FL_MIN_LINGER*HZ;
331 	if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
332 		return 0;
333 	return ttl*HZ;
334 }
335 
336 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
337 {
338 	linger = check_linger(linger);
339 	if (!linger)
340 		return -EPERM;
341 	expires = check_linger(expires);
342 	if (!expires)
343 		return -EPERM;
344 	fl->lastuse = jiffies;
345 	if (time_before(fl->linger, linger))
346 		fl->linger = linger;
347 	if (time_before(expires, fl->linger))
348 		expires = fl->linger;
349 	if (time_before(fl->expires, fl->lastuse + expires))
350 		fl->expires = fl->lastuse + expires;
351 	return 0;
352 }
353 
354 static struct ip6_flowlabel *
355 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
356 	  char __user *optval, int optlen, int *err_p)
357 {
358 	struct ip6_flowlabel *fl = NULL;
359 	int olen;
360 	int addr_type;
361 	int err;
362 
363 	olen = optlen - CMSG_ALIGN(sizeof(*freq));
364 	err = -EINVAL;
365 	if (olen > 64 * 1024)
366 		goto done;
367 
368 	err = -ENOMEM;
369 	fl = kzalloc(sizeof(*fl), GFP_KERNEL);
370 	if (fl == NULL)
371 		goto done;
372 
373 	if (olen > 0) {
374 		struct msghdr msg;
375 		struct flowi6 flowi6;
376 		int junk;
377 
378 		err = -ENOMEM;
379 		fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
380 		if (fl->opt == NULL)
381 			goto done;
382 
383 		memset(fl->opt, 0, sizeof(*fl->opt));
384 		fl->opt->tot_len = sizeof(*fl->opt) + olen;
385 		err = -EFAULT;
386 		if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
387 			goto done;
388 
389 		msg.msg_controllen = olen;
390 		msg.msg_control = (void*)(fl->opt+1);
391 		memset(&flowi6, 0, sizeof(flowi6));
392 
393 		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
394 					    &junk, &junk, &junk);
395 		if (err)
396 			goto done;
397 		err = -EINVAL;
398 		if (fl->opt->opt_flen)
399 			goto done;
400 		if (fl->opt->opt_nflen == 0) {
401 			kfree(fl->opt);
402 			fl->opt = NULL;
403 		}
404 	}
405 
406 	fl->fl_net = hold_net(net);
407 	fl->expires = jiffies;
408 	err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
409 	if (err)
410 		goto done;
411 	fl->share = freq->flr_share;
412 	addr_type = ipv6_addr_type(&freq->flr_dst);
413 	if ((addr_type & IPV6_ADDR_MAPPED) ||
414 	    addr_type == IPV6_ADDR_ANY) {
415 		err = -EINVAL;
416 		goto done;
417 	}
418 	fl->dst = freq->flr_dst;
419 	atomic_set(&fl->users, 1);
420 	switch (fl->share) {
421 	case IPV6_FL_S_EXCL:
422 	case IPV6_FL_S_ANY:
423 		break;
424 	case IPV6_FL_S_PROCESS:
425 		fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
426 		break;
427 	case IPV6_FL_S_USER:
428 		fl->owner.uid = current_euid();
429 		break;
430 	default:
431 		err = -EINVAL;
432 		goto done;
433 	}
434 	return fl;
435 
436 done:
437 	fl_free(fl);
438 	*err_p = err;
439 	return NULL;
440 }
441 
442 static int mem_check(struct sock *sk)
443 {
444 	struct ipv6_pinfo *np = inet6_sk(sk);
445 	struct ipv6_fl_socklist *sfl;
446 	int room = FL_MAX_SIZE - atomic_read(&fl_size);
447 	int count = 0;
448 
449 	if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
450 		return 0;
451 
452 	for_each_sk_fl_rcu(np, sfl)
453 		count++;
454 
455 	if (room <= 0 ||
456 	    ((count >= FL_MAX_PER_SOCK ||
457 	      (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
458 	     !capable(CAP_NET_ADMIN)))
459 		return -ENOBUFS;
460 
461 	return 0;
462 }
463 
464 static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
465 {
466 	if (h1 == h2)
467 		return false;
468 	if (h1 == NULL || h2 == NULL)
469 		return true;
470 	if (h1->hdrlen != h2->hdrlen)
471 		return true;
472 	return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
473 }
474 
475 static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
476 {
477 	if (o1 == o2)
478 		return false;
479 	if (o1 == NULL || o2 == NULL)
480 		return true;
481 	if (o1->opt_nflen != o2->opt_nflen)
482 		return true;
483 	if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
484 		return true;
485 	if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
486 		return true;
487 	if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
488 		return true;
489 	return false;
490 }
491 
492 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
493 		struct ip6_flowlabel *fl)
494 {
495 	spin_lock_bh(&ip6_sk_fl_lock);
496 	sfl->fl = fl;
497 	sfl->next = np->ipv6_fl_list;
498 	rcu_assign_pointer(np->ipv6_fl_list, sfl);
499 	spin_unlock_bh(&ip6_sk_fl_lock);
500 }
501 
502 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
503 {
504 	int uninitialized_var(err);
505 	struct net *net = sock_net(sk);
506 	struct ipv6_pinfo *np = inet6_sk(sk);
507 	struct in6_flowlabel_req freq;
508 	struct ipv6_fl_socklist *sfl1=NULL;
509 	struct ipv6_fl_socklist *sfl, **sflp;
510 	struct ip6_flowlabel *fl, *fl1 = NULL;
511 
512 
513 	if (optlen < sizeof(freq))
514 		return -EINVAL;
515 
516 	if (copy_from_user(&freq, optval, sizeof(freq)))
517 		return -EFAULT;
518 
519 	switch (freq.flr_action) {
520 	case IPV6_FL_A_PUT:
521 		spin_lock_bh(&ip6_sk_fl_lock);
522 		for (sflp = &np->ipv6_fl_list;
523 		     (sfl = rcu_dereference(*sflp))!=NULL;
524 		     sflp = &sfl->next) {
525 			if (sfl->fl->label == freq.flr_label) {
526 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
527 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
528 				*sflp = rcu_dereference(sfl->next);
529 				spin_unlock_bh(&ip6_sk_fl_lock);
530 				fl_release(sfl->fl);
531 				kfree_rcu(sfl, rcu);
532 				return 0;
533 			}
534 		}
535 		spin_unlock_bh(&ip6_sk_fl_lock);
536 		return -ESRCH;
537 
538 	case IPV6_FL_A_RENEW:
539 		rcu_read_lock_bh();
540 		for_each_sk_fl_rcu(np, sfl) {
541 			if (sfl->fl->label == freq.flr_label) {
542 				err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
543 				rcu_read_unlock_bh();
544 				return err;
545 			}
546 		}
547 		rcu_read_unlock_bh();
548 
549 		if (freq.flr_share == IPV6_FL_S_NONE &&
550 		    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
551 			fl = fl_lookup(net, freq.flr_label);
552 			if (fl) {
553 				err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
554 				fl_release(fl);
555 				return err;
556 			}
557 		}
558 		return -ESRCH;
559 
560 	case IPV6_FL_A_GET:
561 		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
562 			return -EINVAL;
563 
564 		fl = fl_create(net, sk, &freq, optval, optlen, &err);
565 		if (fl == NULL)
566 			return err;
567 		sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
568 
569 		if (freq.flr_label) {
570 			err = -EEXIST;
571 			rcu_read_lock_bh();
572 			for_each_sk_fl_rcu(np, sfl) {
573 				if (sfl->fl->label == freq.flr_label) {
574 					if (freq.flr_flags&IPV6_FL_F_EXCL) {
575 						rcu_read_unlock_bh();
576 						goto done;
577 					}
578 					fl1 = sfl->fl;
579 					atomic_inc(&fl1->users);
580 					break;
581 				}
582 			}
583 			rcu_read_unlock_bh();
584 
585 			if (fl1 == NULL)
586 				fl1 = fl_lookup(net, freq.flr_label);
587 			if (fl1) {
588 recheck:
589 				err = -EEXIST;
590 				if (freq.flr_flags&IPV6_FL_F_EXCL)
591 					goto release;
592 				err = -EPERM;
593 				if (fl1->share == IPV6_FL_S_EXCL ||
594 				    fl1->share != fl->share ||
595 				    ((fl1->share == IPV6_FL_S_PROCESS) &&
596 				     (fl1->owner.pid == fl->owner.pid)) ||
597 				    ((fl1->share == IPV6_FL_S_USER) &&
598 				     uid_eq(fl1->owner.uid, fl->owner.uid)))
599 					goto release;
600 
601 				err = -EINVAL;
602 				if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
603 				    ipv6_opt_cmp(fl1->opt, fl->opt))
604 					goto release;
605 
606 				err = -ENOMEM;
607 				if (sfl1 == NULL)
608 					goto release;
609 				if (fl->linger > fl1->linger)
610 					fl1->linger = fl->linger;
611 				if ((long)(fl->expires - fl1->expires) > 0)
612 					fl1->expires = fl->expires;
613 				fl_link(np, sfl1, fl1);
614 				fl_free(fl);
615 				return 0;
616 
617 release:
618 				fl_release(fl1);
619 				goto done;
620 			}
621 		}
622 		err = -ENOENT;
623 		if (!(freq.flr_flags&IPV6_FL_F_CREATE))
624 			goto done;
625 
626 		err = -ENOMEM;
627 		if (sfl1 == NULL || (err = mem_check(sk)) != 0)
628 			goto done;
629 
630 		fl1 = fl_intern(net, fl, freq.flr_label);
631 		if (fl1 != NULL)
632 			goto recheck;
633 
634 		if (!freq.flr_label) {
635 			if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
636 					 &fl->label, sizeof(fl->label))) {
637 				/* Intentionally ignore fault. */
638 			}
639 		}
640 
641 		fl_link(np, sfl1, fl);
642 		return 0;
643 
644 	default:
645 		return -EINVAL;
646 	}
647 
648 done:
649 	fl_free(fl);
650 	kfree(sfl1);
651 	return err;
652 }
653 
654 #ifdef CONFIG_PROC_FS
655 
656 struct ip6fl_iter_state {
657 	struct seq_net_private p;
658 	struct pid_namespace *pid_ns;
659 	int bucket;
660 };
661 
662 #define ip6fl_seq_private(seq)	((struct ip6fl_iter_state *)(seq)->private)
663 
664 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
665 {
666 	struct ip6_flowlabel *fl = NULL;
667 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
668 	struct net *net = seq_file_net(seq);
669 
670 	for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
671 		for_each_fl_rcu(state->bucket, fl) {
672 			if (net_eq(fl->fl_net, net))
673 				goto out;
674 		}
675 	}
676 	fl = NULL;
677 out:
678 	return fl;
679 }
680 
681 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
682 {
683 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
684 	struct net *net = seq_file_net(seq);
685 
686 	for_each_fl_continue_rcu(fl) {
687 		if (net_eq(fl->fl_net, net))
688 			goto out;
689 	}
690 
691 try_again:
692 	if (++state->bucket <= FL_HASH_MASK) {
693 		for_each_fl_rcu(state->bucket, fl) {
694 			if (net_eq(fl->fl_net, net))
695 				goto out;
696 		}
697 		goto try_again;
698 	}
699 	fl = NULL;
700 
701 out:
702 	return fl;
703 }
704 
705 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
706 {
707 	struct ip6_flowlabel *fl = ip6fl_get_first(seq);
708 	if (fl)
709 		while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
710 			--pos;
711 	return pos ? NULL : fl;
712 }
713 
714 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
715 	__acquires(RCU)
716 {
717 	rcu_read_lock_bh();
718 	return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
719 }
720 
721 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
722 {
723 	struct ip6_flowlabel *fl;
724 
725 	if (v == SEQ_START_TOKEN)
726 		fl = ip6fl_get_first(seq);
727 	else
728 		fl = ip6fl_get_next(seq, v);
729 	++*pos;
730 	return fl;
731 }
732 
733 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
734 	__releases(RCU)
735 {
736 	rcu_read_unlock_bh();
737 }
738 
739 static int ip6fl_seq_show(struct seq_file *seq, void *v)
740 {
741 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
742 	if (v == SEQ_START_TOKEN)
743 		seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
744 			   "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
745 	else {
746 		struct ip6_flowlabel *fl = v;
747 		seq_printf(seq,
748 			   "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
749 			   (unsigned int)ntohl(fl->label),
750 			   fl->share,
751 			   ((fl->share == IPV6_FL_S_PROCESS) ?
752 			    pid_nr_ns(fl->owner.pid, state->pid_ns) :
753 			    ((fl->share == IPV6_FL_S_USER) ?
754 			     from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
755 			     0)),
756 			   atomic_read(&fl->users),
757 			   fl->linger/HZ,
758 			   (long)(fl->expires - jiffies)/HZ,
759 			   &fl->dst,
760 			   fl->opt ? fl->opt->opt_nflen : 0);
761 	}
762 	return 0;
763 }
764 
765 static const struct seq_operations ip6fl_seq_ops = {
766 	.start	=	ip6fl_seq_start,
767 	.next	=	ip6fl_seq_next,
768 	.stop	=	ip6fl_seq_stop,
769 	.show	=	ip6fl_seq_show,
770 };
771 
772 static int ip6fl_seq_open(struct inode *inode, struct file *file)
773 {
774 	struct seq_file *seq;
775 	struct ip6fl_iter_state *state;
776 	int err;
777 
778 	err = seq_open_net(inode, file, &ip6fl_seq_ops,
779 			   sizeof(struct ip6fl_iter_state));
780 
781 	if (!err) {
782 		seq = file->private_data;
783 		state = ip6fl_seq_private(seq);
784 		rcu_read_lock();
785 		state->pid_ns = get_pid_ns(task_active_pid_ns(current));
786 		rcu_read_unlock();
787 	}
788 	return err;
789 }
790 
791 static int ip6fl_seq_release(struct inode *inode, struct file *file)
792 {
793 	struct seq_file *seq = file->private_data;
794 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
795 	put_pid_ns(state->pid_ns);
796 	return seq_release_net(inode, file);
797 }
798 
799 static const struct file_operations ip6fl_seq_fops = {
800 	.owner		=	THIS_MODULE,
801 	.open		=	ip6fl_seq_open,
802 	.read		=	seq_read,
803 	.llseek		=	seq_lseek,
804 	.release	=	ip6fl_seq_release,
805 };
806 
807 static int __net_init ip6_flowlabel_proc_init(struct net *net)
808 {
809 	if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
810 			 &ip6fl_seq_fops))
811 		return -ENOMEM;
812 	return 0;
813 }
814 
815 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
816 {
817 	remove_proc_entry("ip6_flowlabel", net->proc_net);
818 }
819 #else
820 static inline int ip6_flowlabel_proc_init(struct net *net)
821 {
822 	return 0;
823 }
824 static inline void ip6_flowlabel_proc_fini(struct net *net)
825 {
826 }
827 #endif
828 
829 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
830 {
831 	ip6_fl_purge(net);
832 	ip6_flowlabel_proc_fini(net);
833 }
834 
835 static struct pernet_operations ip6_flowlabel_net_ops = {
836 	.init = ip6_flowlabel_proc_init,
837 	.exit = ip6_flowlabel_net_exit,
838 };
839 
840 int ip6_flowlabel_init(void)
841 {
842 	return register_pernet_subsys(&ip6_flowlabel_net_ops);
843 }
844 
845 void ip6_flowlabel_cleanup(void)
846 {
847 	del_timer(&ip6_fl_gc_timer);
848 	unregister_pernet_subsys(&ip6_flowlabel_net_ops);
849 }
850