xref: /openbmc/linux/net/ipv6/ip6_flowlabel.c (revision b7019ac5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	ip6_flowlabel.c		IPv6 flowlabel manager.
4  *
5  *	Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  */
7 
8 #include <linux/capability.h>
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/socket.h>
12 #include <linux/net.h>
13 #include <linux/netdevice.h>
14 #include <linux/in6.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <linux/export.h>
19 #include <linux/pid_namespace.h>
20 
21 #include <net/net_namespace.h>
22 #include <net/sock.h>
23 
24 #include <net/ipv6.h>
25 #include <net/rawv6.h>
26 #include <net/transp_v6.h>
27 
28 #include <linux/uaccess.h>
29 
30 #define FL_MIN_LINGER	6	/* Minimal linger. It is set to 6sec specified
31 				   in old IPv6 RFC. Well, it was reasonable value.
32 				 */
33 #define FL_MAX_LINGER	150	/* Maximal linger timeout */
34 
35 /* FL hash table */
36 
37 #define FL_MAX_PER_SOCK	32
38 #define FL_MAX_SIZE	4096
39 #define FL_HASH_MASK	255
40 #define FL_HASH(l)	(ntohl(l)&FL_HASH_MASK)
41 
42 static atomic_t fl_size = ATOMIC_INIT(0);
43 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
44 
45 static void ip6_fl_gc(struct timer_list *unused);
46 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
47 
48 /* FL hash table lock: it protects only of GC */
49 
50 static DEFINE_SPINLOCK(ip6_fl_lock);
51 
52 /* Big socket sock */
53 
54 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
55 
56 #define for_each_fl_rcu(hash, fl)				\
57 	for (fl = rcu_dereference_bh(fl_ht[(hash)]);		\
58 	     fl != NULL;					\
59 	     fl = rcu_dereference_bh(fl->next))
60 #define for_each_fl_continue_rcu(fl)				\
61 	for (fl = rcu_dereference_bh(fl->next);			\
62 	     fl != NULL;					\
63 	     fl = rcu_dereference_bh(fl->next))
64 
65 #define for_each_sk_fl_rcu(np, sfl)				\
66 	for (sfl = rcu_dereference_bh(np->ipv6_fl_list);	\
67 	     sfl != NULL;					\
68 	     sfl = rcu_dereference_bh(sfl->next))
69 
70 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
71 {
72 	struct ip6_flowlabel *fl;
73 
74 	for_each_fl_rcu(FL_HASH(label), fl) {
75 		if (fl->label == label && net_eq(fl->fl_net, net))
76 			return fl;
77 	}
78 	return NULL;
79 }
80 
81 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
82 {
83 	struct ip6_flowlabel *fl;
84 
85 	rcu_read_lock_bh();
86 	fl = __fl_lookup(net, label);
87 	if (fl && !atomic_inc_not_zero(&fl->users))
88 		fl = NULL;
89 	rcu_read_unlock_bh();
90 	return fl;
91 }
92 
93 static void fl_free_rcu(struct rcu_head *head)
94 {
95 	struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
96 
97 	if (fl->share == IPV6_FL_S_PROCESS)
98 		put_pid(fl->owner.pid);
99 	kfree(fl->opt);
100 	kfree(fl);
101 }
102 
103 
104 static void fl_free(struct ip6_flowlabel *fl)
105 {
106 	if (fl)
107 		call_rcu(&fl->rcu, fl_free_rcu);
108 }
109 
110 static void fl_release(struct ip6_flowlabel *fl)
111 {
112 	spin_lock_bh(&ip6_fl_lock);
113 
114 	fl->lastuse = jiffies;
115 	if (atomic_dec_and_test(&fl->users)) {
116 		unsigned long ttd = fl->lastuse + fl->linger;
117 		if (time_after(ttd, fl->expires))
118 			fl->expires = ttd;
119 		ttd = fl->expires;
120 		if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
121 			struct ipv6_txoptions *opt = fl->opt;
122 			fl->opt = NULL;
123 			kfree(opt);
124 		}
125 		if (!timer_pending(&ip6_fl_gc_timer) ||
126 		    time_after(ip6_fl_gc_timer.expires, ttd))
127 			mod_timer(&ip6_fl_gc_timer, ttd);
128 	}
129 	spin_unlock_bh(&ip6_fl_lock);
130 }
131 
132 static void ip6_fl_gc(struct timer_list *unused)
133 {
134 	int i;
135 	unsigned long now = jiffies;
136 	unsigned long sched = 0;
137 
138 	spin_lock(&ip6_fl_lock);
139 
140 	for (i = 0; i <= FL_HASH_MASK; i++) {
141 		struct ip6_flowlabel *fl;
142 		struct ip6_flowlabel __rcu **flp;
143 
144 		flp = &fl_ht[i];
145 		while ((fl = rcu_dereference_protected(*flp,
146 						       lockdep_is_held(&ip6_fl_lock))) != NULL) {
147 			if (atomic_read(&fl->users) == 0) {
148 				unsigned long ttd = fl->lastuse + fl->linger;
149 				if (time_after(ttd, fl->expires))
150 					fl->expires = ttd;
151 				ttd = fl->expires;
152 				if (time_after_eq(now, ttd)) {
153 					*flp = fl->next;
154 					fl_free(fl);
155 					atomic_dec(&fl_size);
156 					continue;
157 				}
158 				if (!sched || time_before(ttd, sched))
159 					sched = ttd;
160 			}
161 			flp = &fl->next;
162 		}
163 	}
164 	if (!sched && atomic_read(&fl_size))
165 		sched = now + FL_MAX_LINGER;
166 	if (sched) {
167 		mod_timer(&ip6_fl_gc_timer, sched);
168 	}
169 	spin_unlock(&ip6_fl_lock);
170 }
171 
172 static void __net_exit ip6_fl_purge(struct net *net)
173 {
174 	int i;
175 
176 	spin_lock_bh(&ip6_fl_lock);
177 	for (i = 0; i <= FL_HASH_MASK; i++) {
178 		struct ip6_flowlabel *fl;
179 		struct ip6_flowlabel __rcu **flp;
180 
181 		flp = &fl_ht[i];
182 		while ((fl = rcu_dereference_protected(*flp,
183 						       lockdep_is_held(&ip6_fl_lock))) != NULL) {
184 			if (net_eq(fl->fl_net, net) &&
185 			    atomic_read(&fl->users) == 0) {
186 				*flp = fl->next;
187 				fl_free(fl);
188 				atomic_dec(&fl_size);
189 				continue;
190 			}
191 			flp = &fl->next;
192 		}
193 	}
194 	spin_unlock_bh(&ip6_fl_lock);
195 }
196 
197 static struct ip6_flowlabel *fl_intern(struct net *net,
198 				       struct ip6_flowlabel *fl, __be32 label)
199 {
200 	struct ip6_flowlabel *lfl;
201 
202 	fl->label = label & IPV6_FLOWLABEL_MASK;
203 
204 	spin_lock_bh(&ip6_fl_lock);
205 	if (label == 0) {
206 		for (;;) {
207 			fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
208 			if (fl->label) {
209 				lfl = __fl_lookup(net, fl->label);
210 				if (!lfl)
211 					break;
212 			}
213 		}
214 	} else {
215 		/*
216 		 * we dropper the ip6_fl_lock, so this entry could reappear
217 		 * and we need to recheck with it.
218 		 *
219 		 * OTOH no need to search the active socket first, like it is
220 		 * done in ipv6_flowlabel_opt - sock is locked, so new entry
221 		 * with the same label can only appear on another sock
222 		 */
223 		lfl = __fl_lookup(net, fl->label);
224 		if (lfl) {
225 			atomic_inc(&lfl->users);
226 			spin_unlock_bh(&ip6_fl_lock);
227 			return lfl;
228 		}
229 	}
230 
231 	fl->lastuse = jiffies;
232 	fl->next = fl_ht[FL_HASH(fl->label)];
233 	rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
234 	atomic_inc(&fl_size);
235 	spin_unlock_bh(&ip6_fl_lock);
236 	return NULL;
237 }
238 
239 
240 
241 /* Socket flowlabel lists */
242 
243 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
244 {
245 	struct ipv6_fl_socklist *sfl;
246 	struct ipv6_pinfo *np = inet6_sk(sk);
247 
248 	label &= IPV6_FLOWLABEL_MASK;
249 
250 	rcu_read_lock_bh();
251 	for_each_sk_fl_rcu(np, sfl) {
252 		struct ip6_flowlabel *fl = sfl->fl;
253 		if (fl->label == label) {
254 			fl->lastuse = jiffies;
255 			atomic_inc(&fl->users);
256 			rcu_read_unlock_bh();
257 			return fl;
258 		}
259 	}
260 	rcu_read_unlock_bh();
261 	return NULL;
262 }
263 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
264 
265 void fl6_free_socklist(struct sock *sk)
266 {
267 	struct ipv6_pinfo *np = inet6_sk(sk);
268 	struct ipv6_fl_socklist *sfl;
269 
270 	if (!rcu_access_pointer(np->ipv6_fl_list))
271 		return;
272 
273 	spin_lock_bh(&ip6_sk_fl_lock);
274 	while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
275 						lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
276 		np->ipv6_fl_list = sfl->next;
277 		spin_unlock_bh(&ip6_sk_fl_lock);
278 
279 		fl_release(sfl->fl);
280 		kfree_rcu(sfl, rcu);
281 
282 		spin_lock_bh(&ip6_sk_fl_lock);
283 	}
284 	spin_unlock_bh(&ip6_sk_fl_lock);
285 }
286 
287 /* Service routines */
288 
289 
290 /*
291    It is the only difficult place. flowlabel enforces equal headers
292    before and including routing header, however user may supply options
293    following rthdr.
294  */
295 
296 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
297 					 struct ip6_flowlabel *fl,
298 					 struct ipv6_txoptions *fopt)
299 {
300 	struct ipv6_txoptions *fl_opt = fl->opt;
301 
302 	if (!fopt || fopt->opt_flen == 0)
303 		return fl_opt;
304 
305 	if (fl_opt) {
306 		opt_space->hopopt = fl_opt->hopopt;
307 		opt_space->dst0opt = fl_opt->dst0opt;
308 		opt_space->srcrt = fl_opt->srcrt;
309 		opt_space->opt_nflen = fl_opt->opt_nflen;
310 	} else {
311 		if (fopt->opt_nflen == 0)
312 			return fopt;
313 		opt_space->hopopt = NULL;
314 		opt_space->dst0opt = NULL;
315 		opt_space->srcrt = NULL;
316 		opt_space->opt_nflen = 0;
317 	}
318 	opt_space->dst1opt = fopt->dst1opt;
319 	opt_space->opt_flen = fopt->opt_flen;
320 	opt_space->tot_len = fopt->tot_len;
321 	return opt_space;
322 }
323 EXPORT_SYMBOL_GPL(fl6_merge_options);
324 
325 static unsigned long check_linger(unsigned long ttl)
326 {
327 	if (ttl < FL_MIN_LINGER)
328 		return FL_MIN_LINGER*HZ;
329 	if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
330 		return 0;
331 	return ttl*HZ;
332 }
333 
334 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
335 {
336 	linger = check_linger(linger);
337 	if (!linger)
338 		return -EPERM;
339 	expires = check_linger(expires);
340 	if (!expires)
341 		return -EPERM;
342 
343 	spin_lock_bh(&ip6_fl_lock);
344 	fl->lastuse = jiffies;
345 	if (time_before(fl->linger, linger))
346 		fl->linger = linger;
347 	if (time_before(expires, fl->linger))
348 		expires = fl->linger;
349 	if (time_before(fl->expires, fl->lastuse + expires))
350 		fl->expires = fl->lastuse + expires;
351 	spin_unlock_bh(&ip6_fl_lock);
352 
353 	return 0;
354 }
355 
356 static struct ip6_flowlabel *
357 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
358 	  char __user *optval, int optlen, int *err_p)
359 {
360 	struct ip6_flowlabel *fl = NULL;
361 	int olen;
362 	int addr_type;
363 	int err;
364 
365 	olen = optlen - CMSG_ALIGN(sizeof(*freq));
366 	err = -EINVAL;
367 	if (olen > 64 * 1024)
368 		goto done;
369 
370 	err = -ENOMEM;
371 	fl = kzalloc(sizeof(*fl), GFP_KERNEL);
372 	if (!fl)
373 		goto done;
374 
375 	if (olen > 0) {
376 		struct msghdr msg;
377 		struct flowi6 flowi6;
378 		struct ipcm6_cookie ipc6;
379 
380 		err = -ENOMEM;
381 		fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
382 		if (!fl->opt)
383 			goto done;
384 
385 		memset(fl->opt, 0, sizeof(*fl->opt));
386 		fl->opt->tot_len = sizeof(*fl->opt) + olen;
387 		err = -EFAULT;
388 		if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
389 			goto done;
390 
391 		msg.msg_controllen = olen;
392 		msg.msg_control = (void *)(fl->opt+1);
393 		memset(&flowi6, 0, sizeof(flowi6));
394 
395 		ipc6.opt = fl->opt;
396 		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
397 		if (err)
398 			goto done;
399 		err = -EINVAL;
400 		if (fl->opt->opt_flen)
401 			goto done;
402 		if (fl->opt->opt_nflen == 0) {
403 			kfree(fl->opt);
404 			fl->opt = NULL;
405 		}
406 	}
407 
408 	fl->fl_net = net;
409 	fl->expires = jiffies;
410 	err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
411 	if (err)
412 		goto done;
413 	fl->share = freq->flr_share;
414 	addr_type = ipv6_addr_type(&freq->flr_dst);
415 	if ((addr_type & IPV6_ADDR_MAPPED) ||
416 	    addr_type == IPV6_ADDR_ANY) {
417 		err = -EINVAL;
418 		goto done;
419 	}
420 	fl->dst = freq->flr_dst;
421 	atomic_set(&fl->users, 1);
422 	switch (fl->share) {
423 	case IPV6_FL_S_EXCL:
424 	case IPV6_FL_S_ANY:
425 		break;
426 	case IPV6_FL_S_PROCESS:
427 		fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
428 		break;
429 	case IPV6_FL_S_USER:
430 		fl->owner.uid = current_euid();
431 		break;
432 	default:
433 		err = -EINVAL;
434 		goto done;
435 	}
436 	return fl;
437 
438 done:
439 	fl_free(fl);
440 	*err_p = err;
441 	return NULL;
442 }
443 
444 static int mem_check(struct sock *sk)
445 {
446 	struct ipv6_pinfo *np = inet6_sk(sk);
447 	struct ipv6_fl_socklist *sfl;
448 	int room = FL_MAX_SIZE - atomic_read(&fl_size);
449 	int count = 0;
450 
451 	if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
452 		return 0;
453 
454 	rcu_read_lock_bh();
455 	for_each_sk_fl_rcu(np, sfl)
456 		count++;
457 	rcu_read_unlock_bh();
458 
459 	if (room <= 0 ||
460 	    ((count >= FL_MAX_PER_SOCK ||
461 	      (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
462 	     !capable(CAP_NET_ADMIN)))
463 		return -ENOBUFS;
464 
465 	return 0;
466 }
467 
468 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
469 		struct ip6_flowlabel *fl)
470 {
471 	spin_lock_bh(&ip6_sk_fl_lock);
472 	sfl->fl = fl;
473 	sfl->next = np->ipv6_fl_list;
474 	rcu_assign_pointer(np->ipv6_fl_list, sfl);
475 	spin_unlock_bh(&ip6_sk_fl_lock);
476 }
477 
478 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
479 			   int flags)
480 {
481 	struct ipv6_pinfo *np = inet6_sk(sk);
482 	struct ipv6_fl_socklist *sfl;
483 
484 	if (flags & IPV6_FL_F_REMOTE) {
485 		freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
486 		return 0;
487 	}
488 
489 	if (np->repflow) {
490 		freq->flr_label = np->flow_label;
491 		return 0;
492 	}
493 
494 	rcu_read_lock_bh();
495 
496 	for_each_sk_fl_rcu(np, sfl) {
497 		if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
498 			spin_lock_bh(&ip6_fl_lock);
499 			freq->flr_label = sfl->fl->label;
500 			freq->flr_dst = sfl->fl->dst;
501 			freq->flr_share = sfl->fl->share;
502 			freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
503 			freq->flr_linger = sfl->fl->linger / HZ;
504 
505 			spin_unlock_bh(&ip6_fl_lock);
506 			rcu_read_unlock_bh();
507 			return 0;
508 		}
509 	}
510 	rcu_read_unlock_bh();
511 
512 	return -ENOENT;
513 }
514 
515 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
516 {
517 	int uninitialized_var(err);
518 	struct net *net = sock_net(sk);
519 	struct ipv6_pinfo *np = inet6_sk(sk);
520 	struct in6_flowlabel_req freq;
521 	struct ipv6_fl_socklist *sfl1 = NULL;
522 	struct ipv6_fl_socklist *sfl;
523 	struct ipv6_fl_socklist __rcu **sflp;
524 	struct ip6_flowlabel *fl, *fl1 = NULL;
525 
526 
527 	if (optlen < sizeof(freq))
528 		return -EINVAL;
529 
530 	if (copy_from_user(&freq, optval, sizeof(freq)))
531 		return -EFAULT;
532 
533 	switch (freq.flr_action) {
534 	case IPV6_FL_A_PUT:
535 		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
536 			if (sk->sk_protocol != IPPROTO_TCP)
537 				return -ENOPROTOOPT;
538 			if (!np->repflow)
539 				return -ESRCH;
540 			np->flow_label = 0;
541 			np->repflow = 0;
542 			return 0;
543 		}
544 		spin_lock_bh(&ip6_sk_fl_lock);
545 		for (sflp = &np->ipv6_fl_list;
546 		     (sfl = rcu_dereference_protected(*sflp,
547 						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
548 		     sflp = &sfl->next) {
549 			if (sfl->fl->label == freq.flr_label) {
550 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
551 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
552 				*sflp = sfl->next;
553 				spin_unlock_bh(&ip6_sk_fl_lock);
554 				fl_release(sfl->fl);
555 				kfree_rcu(sfl, rcu);
556 				return 0;
557 			}
558 		}
559 		spin_unlock_bh(&ip6_sk_fl_lock);
560 		return -ESRCH;
561 
562 	case IPV6_FL_A_RENEW:
563 		rcu_read_lock_bh();
564 		for_each_sk_fl_rcu(np, sfl) {
565 			if (sfl->fl->label == freq.flr_label) {
566 				err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
567 				rcu_read_unlock_bh();
568 				return err;
569 			}
570 		}
571 		rcu_read_unlock_bh();
572 
573 		if (freq.flr_share == IPV6_FL_S_NONE &&
574 		    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
575 			fl = fl_lookup(net, freq.flr_label);
576 			if (fl) {
577 				err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
578 				fl_release(fl);
579 				return err;
580 			}
581 		}
582 		return -ESRCH;
583 
584 	case IPV6_FL_A_GET:
585 		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
586 			struct net *net = sock_net(sk);
587 			if (net->ipv6.sysctl.flowlabel_consistency) {
588 				net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
589 				return -EPERM;
590 			}
591 
592 			if (sk->sk_protocol != IPPROTO_TCP)
593 				return -ENOPROTOOPT;
594 
595 			np->repflow = 1;
596 			return 0;
597 		}
598 
599 		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
600 			return -EINVAL;
601 
602 		if (net->ipv6.sysctl.flowlabel_state_ranges &&
603 		    (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
604 			return -ERANGE;
605 
606 		fl = fl_create(net, sk, &freq, optval, optlen, &err);
607 		if (!fl)
608 			return err;
609 		sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
610 
611 		if (freq.flr_label) {
612 			err = -EEXIST;
613 			rcu_read_lock_bh();
614 			for_each_sk_fl_rcu(np, sfl) {
615 				if (sfl->fl->label == freq.flr_label) {
616 					if (freq.flr_flags&IPV6_FL_F_EXCL) {
617 						rcu_read_unlock_bh();
618 						goto done;
619 					}
620 					fl1 = sfl->fl;
621 					atomic_inc(&fl1->users);
622 					break;
623 				}
624 			}
625 			rcu_read_unlock_bh();
626 
627 			if (!fl1)
628 				fl1 = fl_lookup(net, freq.flr_label);
629 			if (fl1) {
630 recheck:
631 				err = -EEXIST;
632 				if (freq.flr_flags&IPV6_FL_F_EXCL)
633 					goto release;
634 				err = -EPERM;
635 				if (fl1->share == IPV6_FL_S_EXCL ||
636 				    fl1->share != fl->share ||
637 				    ((fl1->share == IPV6_FL_S_PROCESS) &&
638 				     (fl1->owner.pid != fl->owner.pid)) ||
639 				    ((fl1->share == IPV6_FL_S_USER) &&
640 				     !uid_eq(fl1->owner.uid, fl->owner.uid)))
641 					goto release;
642 
643 				err = -ENOMEM;
644 				if (!sfl1)
645 					goto release;
646 				if (fl->linger > fl1->linger)
647 					fl1->linger = fl->linger;
648 				if ((long)(fl->expires - fl1->expires) > 0)
649 					fl1->expires = fl->expires;
650 				fl_link(np, sfl1, fl1);
651 				fl_free(fl);
652 				return 0;
653 
654 release:
655 				fl_release(fl1);
656 				goto done;
657 			}
658 		}
659 		err = -ENOENT;
660 		if (!(freq.flr_flags&IPV6_FL_F_CREATE))
661 			goto done;
662 
663 		err = -ENOMEM;
664 		if (!sfl1)
665 			goto done;
666 
667 		err = mem_check(sk);
668 		if (err != 0)
669 			goto done;
670 
671 		fl1 = fl_intern(net, fl, freq.flr_label);
672 		if (fl1)
673 			goto recheck;
674 
675 		if (!freq.flr_label) {
676 			if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
677 					 &fl->label, sizeof(fl->label))) {
678 				/* Intentionally ignore fault. */
679 			}
680 		}
681 
682 		fl_link(np, sfl1, fl);
683 		return 0;
684 
685 	default:
686 		return -EINVAL;
687 	}
688 
689 done:
690 	fl_free(fl);
691 	kfree(sfl1);
692 	return err;
693 }
694 
695 #ifdef CONFIG_PROC_FS
696 
697 struct ip6fl_iter_state {
698 	struct seq_net_private p;
699 	struct pid_namespace *pid_ns;
700 	int bucket;
701 };
702 
703 #define ip6fl_seq_private(seq)	((struct ip6fl_iter_state *)(seq)->private)
704 
705 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
706 {
707 	struct ip6_flowlabel *fl = NULL;
708 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
709 	struct net *net = seq_file_net(seq);
710 
711 	for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
712 		for_each_fl_rcu(state->bucket, fl) {
713 			if (net_eq(fl->fl_net, net))
714 				goto out;
715 		}
716 	}
717 	fl = NULL;
718 out:
719 	return fl;
720 }
721 
722 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
723 {
724 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
725 	struct net *net = seq_file_net(seq);
726 
727 	for_each_fl_continue_rcu(fl) {
728 		if (net_eq(fl->fl_net, net))
729 			goto out;
730 	}
731 
732 try_again:
733 	if (++state->bucket <= FL_HASH_MASK) {
734 		for_each_fl_rcu(state->bucket, fl) {
735 			if (net_eq(fl->fl_net, net))
736 				goto out;
737 		}
738 		goto try_again;
739 	}
740 	fl = NULL;
741 
742 out:
743 	return fl;
744 }
745 
746 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
747 {
748 	struct ip6_flowlabel *fl = ip6fl_get_first(seq);
749 	if (fl)
750 		while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
751 			--pos;
752 	return pos ? NULL : fl;
753 }
754 
755 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
756 	__acquires(RCU)
757 {
758 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
759 
760 	state->pid_ns = proc_pid_ns(file_inode(seq->file));
761 
762 	rcu_read_lock_bh();
763 	return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
764 }
765 
766 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
767 {
768 	struct ip6_flowlabel *fl;
769 
770 	if (v == SEQ_START_TOKEN)
771 		fl = ip6fl_get_first(seq);
772 	else
773 		fl = ip6fl_get_next(seq, v);
774 	++*pos;
775 	return fl;
776 }
777 
778 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
779 	__releases(RCU)
780 {
781 	rcu_read_unlock_bh();
782 }
783 
784 static int ip6fl_seq_show(struct seq_file *seq, void *v)
785 {
786 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
787 	if (v == SEQ_START_TOKEN) {
788 		seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
789 	} else {
790 		struct ip6_flowlabel *fl = v;
791 		seq_printf(seq,
792 			   "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
793 			   (unsigned int)ntohl(fl->label),
794 			   fl->share,
795 			   ((fl->share == IPV6_FL_S_PROCESS) ?
796 			    pid_nr_ns(fl->owner.pid, state->pid_ns) :
797 			    ((fl->share == IPV6_FL_S_USER) ?
798 			     from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
799 			     0)),
800 			   atomic_read(&fl->users),
801 			   fl->linger/HZ,
802 			   (long)(fl->expires - jiffies)/HZ,
803 			   &fl->dst,
804 			   fl->opt ? fl->opt->opt_nflen : 0);
805 	}
806 	return 0;
807 }
808 
809 static const struct seq_operations ip6fl_seq_ops = {
810 	.start	=	ip6fl_seq_start,
811 	.next	=	ip6fl_seq_next,
812 	.stop	=	ip6fl_seq_stop,
813 	.show	=	ip6fl_seq_show,
814 };
815 
816 static int __net_init ip6_flowlabel_proc_init(struct net *net)
817 {
818 	if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
819 			&ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
820 		return -ENOMEM;
821 	return 0;
822 }
823 
824 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
825 {
826 	remove_proc_entry("ip6_flowlabel", net->proc_net);
827 }
828 #else
829 static inline int ip6_flowlabel_proc_init(struct net *net)
830 {
831 	return 0;
832 }
833 static inline void ip6_flowlabel_proc_fini(struct net *net)
834 {
835 }
836 #endif
837 
838 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
839 {
840 	ip6_fl_purge(net);
841 	ip6_flowlabel_proc_fini(net);
842 }
843 
844 static struct pernet_operations ip6_flowlabel_net_ops = {
845 	.init = ip6_flowlabel_proc_init,
846 	.exit = ip6_flowlabel_net_exit,
847 };
848 
849 int ip6_flowlabel_init(void)
850 {
851 	return register_pernet_subsys(&ip6_flowlabel_net_ops);
852 }
853 
854 void ip6_flowlabel_cleanup(void)
855 {
856 	del_timer(&ip6_fl_gc_timer);
857 	unregister_pernet_subsys(&ip6_flowlabel_net_ops);
858 }
859