xref: /openbmc/linux/net/ipv6/ip6_fib.c (revision e868d61272caa648214046a096e5a6bfc068dc8c)
1 /*
2  *	Linux INET6 implementation
3  *	Forwarding Information Database
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	$Id: ip6_fib.c,v 1.25 2001/10/31 21:55:55 davem Exp $
9  *
10  *	This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  */
15 
16 /*
17  * 	Changes:
18  * 	Yuji SEKIYA @USAGI:	Support default route on router node;
19  * 				remove ip6_null_entry from the top of
20  * 				routing table.
21  * 	Ville Nuorvala:		Fixed routing subtrees.
22  */
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/net.h>
26 #include <linux/route.h>
27 #include <linux/netdevice.h>
28 #include <linux/in6.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 
32 #ifdef 	CONFIG_PROC_FS
33 #include <linux/proc_fs.h>
34 #endif
35 
36 #include <net/ipv6.h>
37 #include <net/ndisc.h>
38 #include <net/addrconf.h>
39 
40 #include <net/ip6_fib.h>
41 #include <net/ip6_route.h>
42 
43 #define RT6_DEBUG 2
44 
45 #if RT6_DEBUG >= 3
46 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
47 #else
48 #define RT6_TRACE(x...) do { ; } while (0)
49 #endif
50 
51 struct rt6_statistics	rt6_stats;
52 
53 static struct kmem_cache * fib6_node_kmem __read_mostly;
54 
55 enum fib_walk_state_t
56 {
57 #ifdef CONFIG_IPV6_SUBTREES
58 	FWS_S,
59 #endif
60 	FWS_L,
61 	FWS_R,
62 	FWS_C,
63 	FWS_U
64 };
65 
66 struct fib6_cleaner_t
67 {
68 	struct fib6_walker_t w;
69 	int (*func)(struct rt6_info *, void *arg);
70 	void *arg;
71 };
72 
73 static DEFINE_RWLOCK(fib6_walker_lock);
74 
75 #ifdef CONFIG_IPV6_SUBTREES
76 #define FWS_INIT FWS_S
77 #else
78 #define FWS_INIT FWS_L
79 #endif
80 
81 static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt);
82 static struct rt6_info * fib6_find_prefix(struct fib6_node *fn);
83 static struct fib6_node * fib6_repair_tree(struct fib6_node *fn);
84 static int fib6_walk(struct fib6_walker_t *w);
85 static int fib6_walk_continue(struct fib6_walker_t *w);
86 
87 /*
88  *	A routing update causes an increase of the serial number on the
89  *	affected subtree. This allows for cached routes to be asynchronously
90  *	tested when modifications are made to the destination cache as a
91  *	result of redirects, path MTU changes, etc.
92  */
93 
94 static __u32 rt_sernum;
95 
96 static DEFINE_TIMER(ip6_fib_timer, fib6_run_gc, 0, 0);
97 
98 static struct fib6_walker_t fib6_walker_list = {
99 	.prev	= &fib6_walker_list,
100 	.next	= &fib6_walker_list,
101 };
102 
103 #define FOR_WALKERS(w) for ((w)=fib6_walker_list.next; (w) != &fib6_walker_list; (w)=(w)->next)
104 
105 static inline void fib6_walker_link(struct fib6_walker_t *w)
106 {
107 	write_lock_bh(&fib6_walker_lock);
108 	w->next = fib6_walker_list.next;
109 	w->prev = &fib6_walker_list;
110 	w->next->prev = w;
111 	w->prev->next = w;
112 	write_unlock_bh(&fib6_walker_lock);
113 }
114 
115 static inline void fib6_walker_unlink(struct fib6_walker_t *w)
116 {
117 	write_lock_bh(&fib6_walker_lock);
118 	w->next->prev = w->prev;
119 	w->prev->next = w->next;
120 	w->prev = w->next = w;
121 	write_unlock_bh(&fib6_walker_lock);
122 }
123 static __inline__ u32 fib6_new_sernum(void)
124 {
125 	u32 n = ++rt_sernum;
126 	if ((__s32)n <= 0)
127 		rt_sernum = n = 1;
128 	return n;
129 }
130 
131 /*
132  *	Auxiliary address test functions for the radix tree.
133  *
134  *	These assume a 32bit processor (although it will work on
135  *	64bit processors)
136  */
137 
138 /*
139  *	test bit
140  */
141 
142 static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
143 {
144 	__be32 *addr = token;
145 
146 	return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5];
147 }
148 
149 static __inline__ struct fib6_node * node_alloc(void)
150 {
151 	struct fib6_node *fn;
152 
153 	fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
154 
155 	return fn;
156 }
157 
158 static __inline__ void node_free(struct fib6_node * fn)
159 {
160 	kmem_cache_free(fib6_node_kmem, fn);
161 }
162 
163 static __inline__ void rt6_release(struct rt6_info *rt)
164 {
165 	if (atomic_dec_and_test(&rt->rt6i_ref))
166 		dst_free(&rt->u.dst);
167 }
168 
169 static struct fib6_table fib6_main_tbl = {
170 	.tb6_id		= RT6_TABLE_MAIN,
171 	.tb6_root	= {
172 		.leaf		= &ip6_null_entry,
173 		.fn_flags	= RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
174 	},
175 };
176 
177 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
178 #define FIB_TABLE_HASHSZ 256
179 #else
180 #define FIB_TABLE_HASHSZ 1
181 #endif
182 static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ];
183 
184 static void fib6_link_table(struct fib6_table *tb)
185 {
186 	unsigned int h;
187 
188 	/*
189 	 * Initialize table lock at a single place to give lockdep a key,
190 	 * tables aren't visible prior to being linked to the list.
191 	 */
192 	rwlock_init(&tb->tb6_lock);
193 
194 	h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1);
195 
196 	/*
197 	 * No protection necessary, this is the only list mutatation
198 	 * operation, tables never disappear once they exist.
199 	 */
200 	hlist_add_head_rcu(&tb->tb6_hlist, &fib_table_hash[h]);
201 }
202 
203 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
204 static struct fib6_table fib6_local_tbl = {
205 	.tb6_id		= RT6_TABLE_LOCAL,
206 	.tb6_root 	= {
207 		.leaf		= &ip6_null_entry,
208 		.fn_flags	= RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO,
209 	},
210 };
211 
212 static struct fib6_table *fib6_alloc_table(u32 id)
213 {
214 	struct fib6_table *table;
215 
216 	table = kzalloc(sizeof(*table), GFP_ATOMIC);
217 	if (table != NULL) {
218 		table->tb6_id = id;
219 		table->tb6_root.leaf = &ip6_null_entry;
220 		table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
221 	}
222 
223 	return table;
224 }
225 
226 struct fib6_table *fib6_new_table(u32 id)
227 {
228 	struct fib6_table *tb;
229 
230 	if (id == 0)
231 		id = RT6_TABLE_MAIN;
232 	tb = fib6_get_table(id);
233 	if (tb)
234 		return tb;
235 
236 	tb = fib6_alloc_table(id);
237 	if (tb != NULL)
238 		fib6_link_table(tb);
239 
240 	return tb;
241 }
242 
243 struct fib6_table *fib6_get_table(u32 id)
244 {
245 	struct fib6_table *tb;
246 	struct hlist_node *node;
247 	unsigned int h;
248 
249 	if (id == 0)
250 		id = RT6_TABLE_MAIN;
251 	h = id & (FIB_TABLE_HASHSZ - 1);
252 	rcu_read_lock();
253 	hlist_for_each_entry_rcu(tb, node, &fib_table_hash[h], tb6_hlist) {
254 		if (tb->tb6_id == id) {
255 			rcu_read_unlock();
256 			return tb;
257 		}
258 	}
259 	rcu_read_unlock();
260 
261 	return NULL;
262 }
263 
264 static void __init fib6_tables_init(void)
265 {
266 	fib6_link_table(&fib6_main_tbl);
267 	fib6_link_table(&fib6_local_tbl);
268 }
269 
270 #else
271 
272 struct fib6_table *fib6_new_table(u32 id)
273 {
274 	return fib6_get_table(id);
275 }
276 
277 struct fib6_table *fib6_get_table(u32 id)
278 {
279 	return &fib6_main_tbl;
280 }
281 
282 struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags,
283 				   pol_lookup_t lookup)
284 {
285 	return (struct dst_entry *) lookup(&fib6_main_tbl, fl, flags);
286 }
287 
288 static void __init fib6_tables_init(void)
289 {
290 	fib6_link_table(&fib6_main_tbl);
291 }
292 
293 #endif
294 
295 static int fib6_dump_node(struct fib6_walker_t *w)
296 {
297 	int res;
298 	struct rt6_info *rt;
299 
300 	for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
301 		res = rt6_dump_route(rt, w->args);
302 		if (res < 0) {
303 			/* Frame is full, suspend walking */
304 			w->leaf = rt;
305 			return 1;
306 		}
307 		BUG_TRAP(res!=0);
308 	}
309 	w->leaf = NULL;
310 	return 0;
311 }
312 
313 static void fib6_dump_end(struct netlink_callback *cb)
314 {
315 	struct fib6_walker_t *w = (void*)cb->args[2];
316 
317 	if (w) {
318 		cb->args[2] = 0;
319 		kfree(w);
320 	}
321 	cb->done = (void*)cb->args[3];
322 	cb->args[1] = 3;
323 }
324 
325 static int fib6_dump_done(struct netlink_callback *cb)
326 {
327 	fib6_dump_end(cb);
328 	return cb->done ? cb->done(cb) : 0;
329 }
330 
331 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
332 			   struct netlink_callback *cb)
333 {
334 	struct fib6_walker_t *w;
335 	int res;
336 
337 	w = (void *)cb->args[2];
338 	w->root = &table->tb6_root;
339 
340 	if (cb->args[4] == 0) {
341 		read_lock_bh(&table->tb6_lock);
342 		res = fib6_walk(w);
343 		read_unlock_bh(&table->tb6_lock);
344 		if (res > 0)
345 			cb->args[4] = 1;
346 	} else {
347 		read_lock_bh(&table->tb6_lock);
348 		res = fib6_walk_continue(w);
349 		read_unlock_bh(&table->tb6_lock);
350 		if (res != 0) {
351 			if (res < 0)
352 				fib6_walker_unlink(w);
353 			goto end;
354 		}
355 		fib6_walker_unlink(w);
356 		cb->args[4] = 0;
357 	}
358 end:
359 	return res;
360 }
361 
362 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
363 {
364 	unsigned int h, s_h;
365 	unsigned int e = 0, s_e;
366 	struct rt6_rtnl_dump_arg arg;
367 	struct fib6_walker_t *w;
368 	struct fib6_table *tb;
369 	struct hlist_node *node;
370 	int res = 0;
371 
372 	s_h = cb->args[0];
373 	s_e = cb->args[1];
374 
375 	w = (void *)cb->args[2];
376 	if (w == NULL) {
377 		/* New dump:
378 		 *
379 		 * 1. hook callback destructor.
380 		 */
381 		cb->args[3] = (long)cb->done;
382 		cb->done = fib6_dump_done;
383 
384 		/*
385 		 * 2. allocate and initialize walker.
386 		 */
387 		w = kzalloc(sizeof(*w), GFP_ATOMIC);
388 		if (w == NULL)
389 			return -ENOMEM;
390 		w->func = fib6_dump_node;
391 		cb->args[2] = (long)w;
392 	}
393 
394 	arg.skb = skb;
395 	arg.cb = cb;
396 	w->args = &arg;
397 
398 	for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
399 		e = 0;
400 		hlist_for_each_entry(tb, node, &fib_table_hash[h], tb6_hlist) {
401 			if (e < s_e)
402 				goto next;
403 			res = fib6_dump_table(tb, skb, cb);
404 			if (res != 0)
405 				goto out;
406 next:
407 			e++;
408 		}
409 	}
410 out:
411 	cb->args[1] = e;
412 	cb->args[0] = h;
413 
414 	res = res < 0 ? res : skb->len;
415 	if (res <= 0)
416 		fib6_dump_end(cb);
417 	return res;
418 }
419 
420 /*
421  *	Routing Table
422  *
423  *	return the appropriate node for a routing tree "add" operation
424  *	by either creating and inserting or by returning an existing
425  *	node.
426  */
427 
428 static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
429 				     int addrlen, int plen,
430 				     int offset)
431 {
432 	struct fib6_node *fn, *in, *ln;
433 	struct fib6_node *pn = NULL;
434 	struct rt6key *key;
435 	int	bit;
436 	__be32	dir = 0;
437 	__u32	sernum = fib6_new_sernum();
438 
439 	RT6_TRACE("fib6_add_1\n");
440 
441 	/* insert node in tree */
442 
443 	fn = root;
444 
445 	do {
446 		key = (struct rt6key *)((u8 *)fn->leaf + offset);
447 
448 		/*
449 		 *	Prefix match
450 		 */
451 		if (plen < fn->fn_bit ||
452 		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
453 			goto insert_above;
454 
455 		/*
456 		 *	Exact match ?
457 		 */
458 
459 		if (plen == fn->fn_bit) {
460 			/* clean up an intermediate node */
461 			if ((fn->fn_flags & RTN_RTINFO) == 0) {
462 				rt6_release(fn->leaf);
463 				fn->leaf = NULL;
464 			}
465 
466 			fn->fn_sernum = sernum;
467 
468 			return fn;
469 		}
470 
471 		/*
472 		 *	We have more bits to go
473 		 */
474 
475 		/* Try to walk down on tree. */
476 		fn->fn_sernum = sernum;
477 		dir = addr_bit_set(addr, fn->fn_bit);
478 		pn = fn;
479 		fn = dir ? fn->right: fn->left;
480 	} while (fn);
481 
482 	/*
483 	 *	We walked to the bottom of tree.
484 	 *	Create new leaf node without children.
485 	 */
486 
487 	ln = node_alloc();
488 
489 	if (ln == NULL)
490 		return NULL;
491 	ln->fn_bit = plen;
492 
493 	ln->parent = pn;
494 	ln->fn_sernum = sernum;
495 
496 	if (dir)
497 		pn->right = ln;
498 	else
499 		pn->left  = ln;
500 
501 	return ln;
502 
503 
504 insert_above:
505 	/*
506 	 * split since we don't have a common prefix anymore or
507 	 * we have a less significant route.
508 	 * we've to insert an intermediate node on the list
509 	 * this new node will point to the one we need to create
510 	 * and the current
511 	 */
512 
513 	pn = fn->parent;
514 
515 	/* find 1st bit in difference between the 2 addrs.
516 
517 	   See comment in __ipv6_addr_diff: bit may be an invalid value,
518 	   but if it is >= plen, the value is ignored in any case.
519 	 */
520 
521 	bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
522 
523 	/*
524 	 *		(intermediate)[in]
525 	 *	          /	   \
526 	 *	(new leaf node)[ln] (old node)[fn]
527 	 */
528 	if (plen > bit) {
529 		in = node_alloc();
530 		ln = node_alloc();
531 
532 		if (in == NULL || ln == NULL) {
533 			if (in)
534 				node_free(in);
535 			if (ln)
536 				node_free(ln);
537 			return NULL;
538 		}
539 
540 		/*
541 		 * new intermediate node.
542 		 * RTN_RTINFO will
543 		 * be off since that an address that chooses one of
544 		 * the branches would not match less specific routes
545 		 * in the other branch
546 		 */
547 
548 		in->fn_bit = bit;
549 
550 		in->parent = pn;
551 		in->leaf = fn->leaf;
552 		atomic_inc(&in->leaf->rt6i_ref);
553 
554 		in->fn_sernum = sernum;
555 
556 		/* update parent pointer */
557 		if (dir)
558 			pn->right = in;
559 		else
560 			pn->left  = in;
561 
562 		ln->fn_bit = plen;
563 
564 		ln->parent = in;
565 		fn->parent = in;
566 
567 		ln->fn_sernum = sernum;
568 
569 		if (addr_bit_set(addr, bit)) {
570 			in->right = ln;
571 			in->left  = fn;
572 		} else {
573 			in->left  = ln;
574 			in->right = fn;
575 		}
576 	} else { /* plen <= bit */
577 
578 		/*
579 		 *		(new leaf node)[ln]
580 		 *	          /	   \
581 		 *	     (old node)[fn] NULL
582 		 */
583 
584 		ln = node_alloc();
585 
586 		if (ln == NULL)
587 			return NULL;
588 
589 		ln->fn_bit = plen;
590 
591 		ln->parent = pn;
592 
593 		ln->fn_sernum = sernum;
594 
595 		if (dir)
596 			pn->right = ln;
597 		else
598 			pn->left  = ln;
599 
600 		if (addr_bit_set(&key->addr, plen))
601 			ln->right = fn;
602 		else
603 			ln->left  = fn;
604 
605 		fn->parent = ln;
606 	}
607 	return ln;
608 }
609 
610 /*
611  *	Insert routing information in a node.
612  */
613 
614 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
615 			    struct nl_info *info)
616 {
617 	struct rt6_info *iter = NULL;
618 	struct rt6_info **ins;
619 
620 	ins = &fn->leaf;
621 
622 	if (fn->fn_flags&RTN_TL_ROOT &&
623 	    fn->leaf == &ip6_null_entry &&
624 	    !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){
625 		fn->leaf = rt;
626 		rt->u.dst.rt6_next = NULL;
627 		goto out;
628 	}
629 
630 	for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
631 		/*
632 		 *	Search for duplicates
633 		 */
634 
635 		if (iter->rt6i_metric == rt->rt6i_metric) {
636 			/*
637 			 *	Same priority level
638 			 */
639 
640 			if (iter->rt6i_dev == rt->rt6i_dev &&
641 			    iter->rt6i_idev == rt->rt6i_idev &&
642 			    ipv6_addr_equal(&iter->rt6i_gateway,
643 					    &rt->rt6i_gateway)) {
644 				if (!(iter->rt6i_flags&RTF_EXPIRES))
645 					return -EEXIST;
646 				iter->rt6i_expires = rt->rt6i_expires;
647 				if (!(rt->rt6i_flags&RTF_EXPIRES)) {
648 					iter->rt6i_flags &= ~RTF_EXPIRES;
649 					iter->rt6i_expires = 0;
650 				}
651 				return -EEXIST;
652 			}
653 		}
654 
655 		if (iter->rt6i_metric > rt->rt6i_metric)
656 			break;
657 
658 		ins = &iter->u.dst.rt6_next;
659 	}
660 
661 	/* Reset round-robin state, if necessary */
662 	if (ins == &fn->leaf)
663 		fn->rr_ptr = NULL;
664 
665 	/*
666 	 *	insert node
667 	 */
668 
669 out:
670 	rt->u.dst.rt6_next = iter;
671 	*ins = rt;
672 	rt->rt6i_node = fn;
673 	atomic_inc(&rt->rt6i_ref);
674 	inet6_rt_notify(RTM_NEWROUTE, rt, info);
675 	rt6_stats.fib_rt_entries++;
676 
677 	if ((fn->fn_flags & RTN_RTINFO) == 0) {
678 		rt6_stats.fib_route_nodes++;
679 		fn->fn_flags |= RTN_RTINFO;
680 	}
681 
682 	return 0;
683 }
684 
685 static __inline__ void fib6_start_gc(struct rt6_info *rt)
686 {
687 	if (ip6_fib_timer.expires == 0 &&
688 	    (rt->rt6i_flags & (RTF_EXPIRES|RTF_CACHE)))
689 		mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
690 }
691 
692 void fib6_force_start_gc(void)
693 {
694 	if (ip6_fib_timer.expires == 0)
695 		mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
696 }
697 
698 /*
699  *	Add routing information to the routing tree.
700  *	<destination addr>/<source addr>
701  *	with source addr info in sub-trees
702  */
703 
704 int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
705 {
706 	struct fib6_node *fn, *pn = NULL;
707 	int err = -ENOMEM;
708 
709 	fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
710 			rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst));
711 
712 	if (fn == NULL)
713 		goto out;
714 
715 	pn = fn;
716 
717 #ifdef CONFIG_IPV6_SUBTREES
718 	if (rt->rt6i_src.plen) {
719 		struct fib6_node *sn;
720 
721 		if (fn->subtree == NULL) {
722 			struct fib6_node *sfn;
723 
724 			/*
725 			 * Create subtree.
726 			 *
727 			 *		fn[main tree]
728 			 *		|
729 			 *		sfn[subtree root]
730 			 *		   \
731 			 *		    sn[new leaf node]
732 			 */
733 
734 			/* Create subtree root node */
735 			sfn = node_alloc();
736 			if (sfn == NULL)
737 				goto st_failure;
738 
739 			sfn->leaf = &ip6_null_entry;
740 			atomic_inc(&ip6_null_entry.rt6i_ref);
741 			sfn->fn_flags = RTN_ROOT;
742 			sfn->fn_sernum = fib6_new_sernum();
743 
744 			/* Now add the first leaf node to new subtree */
745 
746 			sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
747 					sizeof(struct in6_addr), rt->rt6i_src.plen,
748 					offsetof(struct rt6_info, rt6i_src));
749 
750 			if (sn == NULL) {
751 				/* If it is failed, discard just allocated
752 				   root, and then (in st_failure) stale node
753 				   in main tree.
754 				 */
755 				node_free(sfn);
756 				goto st_failure;
757 			}
758 
759 			/* Now link new subtree to main tree */
760 			sfn->parent = fn;
761 			fn->subtree = sfn;
762 		} else {
763 			sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
764 					sizeof(struct in6_addr), rt->rt6i_src.plen,
765 					offsetof(struct rt6_info, rt6i_src));
766 
767 			if (sn == NULL)
768 				goto st_failure;
769 		}
770 
771 		if (fn->leaf == NULL) {
772 			fn->leaf = rt;
773 			atomic_inc(&rt->rt6i_ref);
774 		}
775 		fn = sn;
776 	}
777 #endif
778 
779 	err = fib6_add_rt2node(fn, rt, info);
780 
781 	if (err == 0) {
782 		fib6_start_gc(rt);
783 		if (!(rt->rt6i_flags&RTF_CACHE))
784 			fib6_prune_clones(pn, rt);
785 	}
786 
787 out:
788 	if (err) {
789 #ifdef CONFIG_IPV6_SUBTREES
790 		/*
791 		 * If fib6_add_1 has cleared the old leaf pointer in the
792 		 * super-tree leaf node we have to find a new one for it.
793 		 */
794 		if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
795 			pn->leaf = fib6_find_prefix(pn);
796 #if RT6_DEBUG >= 2
797 			if (!pn->leaf) {
798 				BUG_TRAP(pn->leaf != NULL);
799 				pn->leaf = &ip6_null_entry;
800 			}
801 #endif
802 			atomic_inc(&pn->leaf->rt6i_ref);
803 		}
804 #endif
805 		dst_free(&rt->u.dst);
806 	}
807 	return err;
808 
809 #ifdef CONFIG_IPV6_SUBTREES
810 	/* Subtree creation failed, probably main tree node
811 	   is orphan. If it is, shoot it.
812 	 */
813 st_failure:
814 	if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
815 		fib6_repair_tree(fn);
816 	dst_free(&rt->u.dst);
817 	return err;
818 #endif
819 }
820 
821 /*
822  *	Routing tree lookup
823  *
824  */
825 
826 struct lookup_args {
827 	int		offset;		/* key offset on rt6_info	*/
828 	struct in6_addr	*addr;		/* search key			*/
829 };
830 
831 static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
832 					struct lookup_args *args)
833 {
834 	struct fib6_node *fn;
835 	__be32 dir;
836 
837 	if (unlikely(args->offset == 0))
838 		return NULL;
839 
840 	/*
841 	 *	Descend on a tree
842 	 */
843 
844 	fn = root;
845 
846 	for (;;) {
847 		struct fib6_node *next;
848 
849 		dir = addr_bit_set(args->addr, fn->fn_bit);
850 
851 		next = dir ? fn->right : fn->left;
852 
853 		if (next) {
854 			fn = next;
855 			continue;
856 		}
857 
858 		break;
859 	}
860 
861 	while(fn) {
862 		if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
863 			struct rt6key *key;
864 
865 			key = (struct rt6key *) ((u8 *) fn->leaf +
866 						 args->offset);
867 
868 			if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
869 #ifdef CONFIG_IPV6_SUBTREES
870 				if (fn->subtree)
871 					fn = fib6_lookup_1(fn->subtree, args + 1);
872 #endif
873 				if (!fn || fn->fn_flags & RTN_RTINFO)
874 					return fn;
875 			}
876 		}
877 
878 		if (fn->fn_flags & RTN_ROOT)
879 			break;
880 
881 		fn = fn->parent;
882 	}
883 
884 	return NULL;
885 }
886 
887 struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr,
888 			       struct in6_addr *saddr)
889 {
890 	struct fib6_node *fn;
891 	struct lookup_args args[] = {
892 		{
893 			.offset = offsetof(struct rt6_info, rt6i_dst),
894 			.addr = daddr,
895 		},
896 #ifdef CONFIG_IPV6_SUBTREES
897 		{
898 			.offset = offsetof(struct rt6_info, rt6i_src),
899 			.addr = saddr,
900 		},
901 #endif
902 		{
903 			.offset = 0,	/* sentinel */
904 		}
905 	};
906 
907 	fn = fib6_lookup_1(root, daddr ? args : args + 1);
908 
909 	if (fn == NULL || fn->fn_flags & RTN_TL_ROOT)
910 		fn = root;
911 
912 	return fn;
913 }
914 
915 /*
916  *	Get node with specified destination prefix (and source prefix,
917  *	if subtrees are used)
918  */
919 
920 
921 static struct fib6_node * fib6_locate_1(struct fib6_node *root,
922 					struct in6_addr *addr,
923 					int plen, int offset)
924 {
925 	struct fib6_node *fn;
926 
927 	for (fn = root; fn ; ) {
928 		struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset);
929 
930 		/*
931 		 *	Prefix match
932 		 */
933 		if (plen < fn->fn_bit ||
934 		    !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
935 			return NULL;
936 
937 		if (plen == fn->fn_bit)
938 			return fn;
939 
940 		/*
941 		 *	We have more bits to go
942 		 */
943 		if (addr_bit_set(addr, fn->fn_bit))
944 			fn = fn->right;
945 		else
946 			fn = fn->left;
947 	}
948 	return NULL;
949 }
950 
951 struct fib6_node * fib6_locate(struct fib6_node *root,
952 			       struct in6_addr *daddr, int dst_len,
953 			       struct in6_addr *saddr, int src_len)
954 {
955 	struct fib6_node *fn;
956 
957 	fn = fib6_locate_1(root, daddr, dst_len,
958 			   offsetof(struct rt6_info, rt6i_dst));
959 
960 #ifdef CONFIG_IPV6_SUBTREES
961 	if (src_len) {
962 		BUG_TRAP(saddr!=NULL);
963 		if (fn && fn->subtree)
964 			fn = fib6_locate_1(fn->subtree, saddr, src_len,
965 					   offsetof(struct rt6_info, rt6i_src));
966 	}
967 #endif
968 
969 	if (fn && fn->fn_flags&RTN_RTINFO)
970 		return fn;
971 
972 	return NULL;
973 }
974 
975 
976 /*
977  *	Deletion
978  *
979  */
980 
981 static struct rt6_info * fib6_find_prefix(struct fib6_node *fn)
982 {
983 	if (fn->fn_flags&RTN_ROOT)
984 		return &ip6_null_entry;
985 
986 	while(fn) {
987 		if(fn->left)
988 			return fn->left->leaf;
989 
990 		if(fn->right)
991 			return fn->right->leaf;
992 
993 		fn = FIB6_SUBTREE(fn);
994 	}
995 	return NULL;
996 }
997 
998 /*
999  *	Called to trim the tree of intermediate nodes when possible. "fn"
1000  *	is the node we want to try and remove.
1001  */
1002 
1003 static struct fib6_node * fib6_repair_tree(struct fib6_node *fn)
1004 {
1005 	int children;
1006 	int nstate;
1007 	struct fib6_node *child, *pn;
1008 	struct fib6_walker_t *w;
1009 	int iter = 0;
1010 
1011 	for (;;) {
1012 		RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
1013 		iter++;
1014 
1015 		BUG_TRAP(!(fn->fn_flags&RTN_RTINFO));
1016 		BUG_TRAP(!(fn->fn_flags&RTN_TL_ROOT));
1017 		BUG_TRAP(fn->leaf==NULL);
1018 
1019 		children = 0;
1020 		child = NULL;
1021 		if (fn->right) child = fn->right, children |= 1;
1022 		if (fn->left) child = fn->left, children |= 2;
1023 
1024 		if (children == 3 || FIB6_SUBTREE(fn)
1025 #ifdef CONFIG_IPV6_SUBTREES
1026 		    /* Subtree root (i.e. fn) may have one child */
1027 		    || (children && fn->fn_flags&RTN_ROOT)
1028 #endif
1029 		    ) {
1030 			fn->leaf = fib6_find_prefix(fn);
1031 #if RT6_DEBUG >= 2
1032 			if (fn->leaf==NULL) {
1033 				BUG_TRAP(fn->leaf);
1034 				fn->leaf = &ip6_null_entry;
1035 			}
1036 #endif
1037 			atomic_inc(&fn->leaf->rt6i_ref);
1038 			return fn->parent;
1039 		}
1040 
1041 		pn = fn->parent;
1042 #ifdef CONFIG_IPV6_SUBTREES
1043 		if (FIB6_SUBTREE(pn) == fn) {
1044 			BUG_TRAP(fn->fn_flags&RTN_ROOT);
1045 			FIB6_SUBTREE(pn) = NULL;
1046 			nstate = FWS_L;
1047 		} else {
1048 			BUG_TRAP(!(fn->fn_flags&RTN_ROOT));
1049 #endif
1050 			if (pn->right == fn) pn->right = child;
1051 			else if (pn->left == fn) pn->left = child;
1052 #if RT6_DEBUG >= 2
1053 			else BUG_TRAP(0);
1054 #endif
1055 			if (child)
1056 				child->parent = pn;
1057 			nstate = FWS_R;
1058 #ifdef CONFIG_IPV6_SUBTREES
1059 		}
1060 #endif
1061 
1062 		read_lock(&fib6_walker_lock);
1063 		FOR_WALKERS(w) {
1064 			if (child == NULL) {
1065 				if (w->root == fn) {
1066 					w->root = w->node = NULL;
1067 					RT6_TRACE("W %p adjusted by delroot 1\n", w);
1068 				} else if (w->node == fn) {
1069 					RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
1070 					w->node = pn;
1071 					w->state = nstate;
1072 				}
1073 			} else {
1074 				if (w->root == fn) {
1075 					w->root = child;
1076 					RT6_TRACE("W %p adjusted by delroot 2\n", w);
1077 				}
1078 				if (w->node == fn) {
1079 					w->node = child;
1080 					if (children&2) {
1081 						RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1082 						w->state = w->state>=FWS_R ? FWS_U : FWS_INIT;
1083 					} else {
1084 						RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1085 						w->state = w->state>=FWS_C ? FWS_U : FWS_INIT;
1086 					}
1087 				}
1088 			}
1089 		}
1090 		read_unlock(&fib6_walker_lock);
1091 
1092 		node_free(fn);
1093 		if (pn->fn_flags&RTN_RTINFO || FIB6_SUBTREE(pn))
1094 			return pn;
1095 
1096 		rt6_release(pn->leaf);
1097 		pn->leaf = NULL;
1098 		fn = pn;
1099 	}
1100 }
1101 
1102 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1103 			   struct nl_info *info)
1104 {
1105 	struct fib6_walker_t *w;
1106 	struct rt6_info *rt = *rtp;
1107 
1108 	RT6_TRACE("fib6_del_route\n");
1109 
1110 	/* Unlink it */
1111 	*rtp = rt->u.dst.rt6_next;
1112 	rt->rt6i_node = NULL;
1113 	rt6_stats.fib_rt_entries--;
1114 	rt6_stats.fib_discarded_routes++;
1115 
1116 	/* Reset round-robin state, if necessary */
1117 	if (fn->rr_ptr == rt)
1118 		fn->rr_ptr = NULL;
1119 
1120 	/* Adjust walkers */
1121 	read_lock(&fib6_walker_lock);
1122 	FOR_WALKERS(w) {
1123 		if (w->state == FWS_C && w->leaf == rt) {
1124 			RT6_TRACE("walker %p adjusted by delroute\n", w);
1125 			w->leaf = rt->u.dst.rt6_next;
1126 			if (w->leaf == NULL)
1127 				w->state = FWS_U;
1128 		}
1129 	}
1130 	read_unlock(&fib6_walker_lock);
1131 
1132 	rt->u.dst.rt6_next = NULL;
1133 
1134 	if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT)
1135 		fn->leaf = &ip6_null_entry;
1136 
1137 	/* If it was last route, expunge its radix tree node */
1138 	if (fn->leaf == NULL) {
1139 		fn->fn_flags &= ~RTN_RTINFO;
1140 		rt6_stats.fib_route_nodes--;
1141 		fn = fib6_repair_tree(fn);
1142 	}
1143 
1144 	if (atomic_read(&rt->rt6i_ref) != 1) {
1145 		/* This route is used as dummy address holder in some split
1146 		 * nodes. It is not leaked, but it still holds other resources,
1147 		 * which must be released in time. So, scan ascendant nodes
1148 		 * and replace dummy references to this route with references
1149 		 * to still alive ones.
1150 		 */
1151 		while (fn) {
1152 			if (!(fn->fn_flags&RTN_RTINFO) && fn->leaf == rt) {
1153 				fn->leaf = fib6_find_prefix(fn);
1154 				atomic_inc(&fn->leaf->rt6i_ref);
1155 				rt6_release(rt);
1156 			}
1157 			fn = fn->parent;
1158 		}
1159 		/* No more references are possible at this point. */
1160 		if (atomic_read(&rt->rt6i_ref) != 1) BUG();
1161 	}
1162 
1163 	inet6_rt_notify(RTM_DELROUTE, rt, info);
1164 	rt6_release(rt);
1165 }
1166 
1167 int fib6_del(struct rt6_info *rt, struct nl_info *info)
1168 {
1169 	struct fib6_node *fn = rt->rt6i_node;
1170 	struct rt6_info **rtp;
1171 
1172 #if RT6_DEBUG >= 2
1173 	if (rt->u.dst.obsolete>0) {
1174 		BUG_TRAP(fn==NULL);
1175 		return -ENOENT;
1176 	}
1177 #endif
1178 	if (fn == NULL || rt == &ip6_null_entry)
1179 		return -ENOENT;
1180 
1181 	BUG_TRAP(fn->fn_flags&RTN_RTINFO);
1182 
1183 	if (!(rt->rt6i_flags&RTF_CACHE)) {
1184 		struct fib6_node *pn = fn;
1185 #ifdef CONFIG_IPV6_SUBTREES
1186 		/* clones of this route might be in another subtree */
1187 		if (rt->rt6i_src.plen) {
1188 			while (!(pn->fn_flags&RTN_ROOT))
1189 				pn = pn->parent;
1190 			pn = pn->parent;
1191 		}
1192 #endif
1193 		fib6_prune_clones(pn, rt);
1194 	}
1195 
1196 	/*
1197 	 *	Walk the leaf entries looking for ourself
1198 	 */
1199 
1200 	for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
1201 		if (*rtp == rt) {
1202 			fib6_del_route(fn, rtp, info);
1203 			return 0;
1204 		}
1205 	}
1206 	return -ENOENT;
1207 }
1208 
1209 /*
1210  *	Tree traversal function.
1211  *
1212  *	Certainly, it is not interrupt safe.
1213  *	However, it is internally reenterable wrt itself and fib6_add/fib6_del.
1214  *	It means, that we can modify tree during walking
1215  *	and use this function for garbage collection, clone pruning,
1216  *	cleaning tree when a device goes down etc. etc.
1217  *
1218  *	It guarantees that every node will be traversed,
1219  *	and that it will be traversed only once.
1220  *
1221  *	Callback function w->func may return:
1222  *	0 -> continue walking.
1223  *	positive value -> walking is suspended (used by tree dumps,
1224  *	and probably by gc, if it will be split to several slices)
1225  *	negative value -> terminate walking.
1226  *
1227  *	The function itself returns:
1228  *	0   -> walk is complete.
1229  *	>0  -> walk is incomplete (i.e. suspended)
1230  *	<0  -> walk is terminated by an error.
1231  */
1232 
1233 static int fib6_walk_continue(struct fib6_walker_t *w)
1234 {
1235 	struct fib6_node *fn, *pn;
1236 
1237 	for (;;) {
1238 		fn = w->node;
1239 		if (fn == NULL)
1240 			return 0;
1241 
1242 		if (w->prune && fn != w->root &&
1243 		    fn->fn_flags&RTN_RTINFO && w->state < FWS_C) {
1244 			w->state = FWS_C;
1245 			w->leaf = fn->leaf;
1246 		}
1247 		switch (w->state) {
1248 #ifdef CONFIG_IPV6_SUBTREES
1249 		case FWS_S:
1250 			if (FIB6_SUBTREE(fn)) {
1251 				w->node = FIB6_SUBTREE(fn);
1252 				continue;
1253 			}
1254 			w->state = FWS_L;
1255 #endif
1256 		case FWS_L:
1257 			if (fn->left) {
1258 				w->node = fn->left;
1259 				w->state = FWS_INIT;
1260 				continue;
1261 			}
1262 			w->state = FWS_R;
1263 		case FWS_R:
1264 			if (fn->right) {
1265 				w->node = fn->right;
1266 				w->state = FWS_INIT;
1267 				continue;
1268 			}
1269 			w->state = FWS_C;
1270 			w->leaf = fn->leaf;
1271 		case FWS_C:
1272 			if (w->leaf && fn->fn_flags&RTN_RTINFO) {
1273 				int err = w->func(w);
1274 				if (err)
1275 					return err;
1276 				continue;
1277 			}
1278 			w->state = FWS_U;
1279 		case FWS_U:
1280 			if (fn == w->root)
1281 				return 0;
1282 			pn = fn->parent;
1283 			w->node = pn;
1284 #ifdef CONFIG_IPV6_SUBTREES
1285 			if (FIB6_SUBTREE(pn) == fn) {
1286 				BUG_TRAP(fn->fn_flags&RTN_ROOT);
1287 				w->state = FWS_L;
1288 				continue;
1289 			}
1290 #endif
1291 			if (pn->left == fn) {
1292 				w->state = FWS_R;
1293 				continue;
1294 			}
1295 			if (pn->right == fn) {
1296 				w->state = FWS_C;
1297 				w->leaf = w->node->leaf;
1298 				continue;
1299 			}
1300 #if RT6_DEBUG >= 2
1301 			BUG_TRAP(0);
1302 #endif
1303 		}
1304 	}
1305 }
1306 
1307 static int fib6_walk(struct fib6_walker_t *w)
1308 {
1309 	int res;
1310 
1311 	w->state = FWS_INIT;
1312 	w->node = w->root;
1313 
1314 	fib6_walker_link(w);
1315 	res = fib6_walk_continue(w);
1316 	if (res <= 0)
1317 		fib6_walker_unlink(w);
1318 	return res;
1319 }
1320 
1321 static int fib6_clean_node(struct fib6_walker_t *w)
1322 {
1323 	int res;
1324 	struct rt6_info *rt;
1325 	struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;
1326 
1327 	for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
1328 		res = c->func(rt, c->arg);
1329 		if (res < 0) {
1330 			w->leaf = rt;
1331 			res = fib6_del(rt, NULL);
1332 			if (res) {
1333 #if RT6_DEBUG >= 2
1334 				printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res);
1335 #endif
1336 				continue;
1337 			}
1338 			return 0;
1339 		}
1340 		BUG_TRAP(res==0);
1341 	}
1342 	w->leaf = rt;
1343 	return 0;
1344 }
1345 
1346 /*
1347  *	Convenient frontend to tree walker.
1348  *
1349  *	func is called on each route.
1350  *		It may return -1 -> delete this route.
1351  *		              0  -> continue walking
1352  *
1353  *	prune==1 -> only immediate children of node (certainly,
1354  *	ignoring pure split nodes) will be scanned.
1355  */
1356 
1357 static void fib6_clean_tree(struct fib6_node *root,
1358 			    int (*func)(struct rt6_info *, void *arg),
1359 			    int prune, void *arg)
1360 {
1361 	struct fib6_cleaner_t c;
1362 
1363 	c.w.root = root;
1364 	c.w.func = fib6_clean_node;
1365 	c.w.prune = prune;
1366 	c.func = func;
1367 	c.arg = arg;
1368 
1369 	fib6_walk(&c.w);
1370 }
1371 
1372 void fib6_clean_all(int (*func)(struct rt6_info *, void *arg),
1373 		    int prune, void *arg)
1374 {
1375 	struct fib6_table *table;
1376 	struct hlist_node *node;
1377 	unsigned int h;
1378 
1379 	rcu_read_lock();
1380 	for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
1381 		hlist_for_each_entry_rcu(table, node, &fib_table_hash[h],
1382 					 tb6_hlist) {
1383 			write_lock_bh(&table->tb6_lock);
1384 			fib6_clean_tree(&table->tb6_root, func, prune, arg);
1385 			write_unlock_bh(&table->tb6_lock);
1386 		}
1387 	}
1388 	rcu_read_unlock();
1389 }
1390 
1391 static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1392 {
1393 	if (rt->rt6i_flags & RTF_CACHE) {
1394 		RT6_TRACE("pruning clone %p\n", rt);
1395 		return -1;
1396 	}
1397 
1398 	return 0;
1399 }
1400 
1401 static void fib6_prune_clones(struct fib6_node *fn, struct rt6_info *rt)
1402 {
1403 	fib6_clean_tree(fn, fib6_prune_clone, 1, rt);
1404 }
1405 
1406 /*
1407  *	Garbage collection
1408  */
1409 
1410 static struct fib6_gc_args
1411 {
1412 	int			timeout;
1413 	int			more;
1414 } gc_args;
1415 
1416 static int fib6_age(struct rt6_info *rt, void *arg)
1417 {
1418 	unsigned long now = jiffies;
1419 
1420 	/*
1421 	 *	check addrconf expiration here.
1422 	 *	Routes are expired even if they are in use.
1423 	 *
1424 	 *	Also age clones. Note, that clones are aged out
1425 	 *	only if they are not in use now.
1426 	 */
1427 
1428 	if (rt->rt6i_flags&RTF_EXPIRES && rt->rt6i_expires) {
1429 		if (time_after(now, rt->rt6i_expires)) {
1430 			RT6_TRACE("expiring %p\n", rt);
1431 			return -1;
1432 		}
1433 		gc_args.more++;
1434 	} else if (rt->rt6i_flags & RTF_CACHE) {
1435 		if (atomic_read(&rt->u.dst.__refcnt) == 0 &&
1436 		    time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) {
1437 			RT6_TRACE("aging clone %p\n", rt);
1438 			return -1;
1439 		} else if ((rt->rt6i_flags & RTF_GATEWAY) &&
1440 			   (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) {
1441 			RT6_TRACE("purging route %p via non-router but gateway\n",
1442 				  rt);
1443 			return -1;
1444 		}
1445 		gc_args.more++;
1446 	}
1447 
1448 	return 0;
1449 }
1450 
1451 static DEFINE_SPINLOCK(fib6_gc_lock);
1452 
1453 void fib6_run_gc(unsigned long dummy)
1454 {
1455 	if (dummy != ~0UL) {
1456 		spin_lock_bh(&fib6_gc_lock);
1457 		gc_args.timeout = dummy ? (int)dummy : ip6_rt_gc_interval;
1458 	} else {
1459 		local_bh_disable();
1460 		if (!spin_trylock(&fib6_gc_lock)) {
1461 			mod_timer(&ip6_fib_timer, jiffies + HZ);
1462 			local_bh_enable();
1463 			return;
1464 		}
1465 		gc_args.timeout = ip6_rt_gc_interval;
1466 	}
1467 	gc_args.more = 0;
1468 
1469 	ndisc_dst_gc(&gc_args.more);
1470 	fib6_clean_all(fib6_age, 0, NULL);
1471 
1472 	if (gc_args.more)
1473 		mod_timer(&ip6_fib_timer, jiffies + ip6_rt_gc_interval);
1474 	else {
1475 		del_timer(&ip6_fib_timer);
1476 		ip6_fib_timer.expires = 0;
1477 	}
1478 	spin_unlock_bh(&fib6_gc_lock);
1479 }
1480 
1481 void __init fib6_init(void)
1482 {
1483 	fib6_node_kmem = kmem_cache_create("fib6_nodes",
1484 					   sizeof(struct fib6_node),
1485 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1486 					   NULL, NULL);
1487 
1488 	fib6_tables_init();
1489 
1490 	__rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib);
1491 }
1492 
1493 void fib6_gc_cleanup(void)
1494 {
1495 	del_timer(&ip6_fib_timer);
1496 	kmem_cache_destroy(fib6_node_kmem);
1497 }
1498