xref: /openbmc/linux/net/core/dst.c (revision 4a6ce2b6)
1 /*
2  * net/core/dst.c	Protocol independent destination cache.
3  *
4  * Authors:		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5  *
6  */
7 
8 #include <linux/bitops.h>
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/workqueue.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/netdevice.h>
17 #include <linux/skbuff.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <net/net_namespace.h>
21 #include <linux/sched.h>
22 #include <linux/prefetch.h>
23 #include <net/lwtunnel.h>
24 
25 #include <net/dst.h>
26 #include <net/dst_metadata.h>
27 
28 /*
29  * Theory of operations:
30  * 1) We use a list, protected by a spinlock, to add
31  *    new entries from both BH and non-BH context.
32  * 2) In order to keep spinlock held for a small delay,
33  *    we use a second list where are stored long lived
34  *    entries, that are handled by the garbage collect thread
35  *    fired by a workqueue.
36  * 3) This list is guarded by a mutex,
37  *    so that the gc_task and dst_dev_event() can be synchronized.
38  */
39 
40 /*
41  * We want to keep lock & list close together
42  * to dirty as few cache lines as possible in __dst_free().
43  * As this is not a very strong hint, we dont force an alignment on SMP.
44  */
45 static struct {
46 	spinlock_t		lock;
47 	struct dst_entry	*list;
48 	unsigned long		timer_inc;
49 	unsigned long		timer_expires;
50 } dst_garbage = {
51 	.lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
52 	.timer_inc = DST_GC_MAX,
53 };
54 static void dst_gc_task(struct work_struct *work);
55 static void ___dst_free(struct dst_entry *dst);
56 
57 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
58 
59 static DEFINE_MUTEX(dst_gc_mutex);
60 /*
61  * long lived entries are maintained in this list, guarded by dst_gc_mutex
62  */
63 static struct dst_entry         *dst_busy_list;
64 
65 static void dst_gc_task(struct work_struct *work)
66 {
67 	int    delayed = 0;
68 	int    work_performed = 0;
69 	unsigned long expires = ~0L;
70 	struct dst_entry *dst, *next, head;
71 	struct dst_entry *last = &head;
72 
73 	mutex_lock(&dst_gc_mutex);
74 	next = dst_busy_list;
75 
76 loop:
77 	while ((dst = next) != NULL) {
78 		next = dst->next;
79 		prefetch(&next->next);
80 		cond_resched();
81 		if (likely(atomic_read(&dst->__refcnt))) {
82 			last->next = dst;
83 			last = dst;
84 			delayed++;
85 			continue;
86 		}
87 		work_performed++;
88 
89 		dst = dst_destroy(dst);
90 		if (dst) {
91 			/* NOHASH and still referenced. Unless it is already
92 			 * on gc list, invalidate it and add to gc list.
93 			 *
94 			 * Note: this is temporary. Actually, NOHASH dst's
95 			 * must be obsoleted when parent is obsoleted.
96 			 * But we do not have state "obsoleted, but
97 			 * referenced by parent", so it is right.
98 			 */
99 			if (dst->obsolete > 0)
100 				continue;
101 
102 			___dst_free(dst);
103 			dst->next = next;
104 			next = dst;
105 		}
106 	}
107 
108 	spin_lock_bh(&dst_garbage.lock);
109 	next = dst_garbage.list;
110 	if (next) {
111 		dst_garbage.list = NULL;
112 		spin_unlock_bh(&dst_garbage.lock);
113 		goto loop;
114 	}
115 	last->next = NULL;
116 	dst_busy_list = head.next;
117 	if (!dst_busy_list)
118 		dst_garbage.timer_inc = DST_GC_MAX;
119 	else {
120 		/*
121 		 * if we freed less than 1/10 of delayed entries,
122 		 * we can sleep longer.
123 		 */
124 		if (work_performed <= delayed/10) {
125 			dst_garbage.timer_expires += dst_garbage.timer_inc;
126 			if (dst_garbage.timer_expires > DST_GC_MAX)
127 				dst_garbage.timer_expires = DST_GC_MAX;
128 			dst_garbage.timer_inc += DST_GC_INC;
129 		} else {
130 			dst_garbage.timer_inc = DST_GC_INC;
131 			dst_garbage.timer_expires = DST_GC_MIN;
132 		}
133 		expires = dst_garbage.timer_expires;
134 		/*
135 		 * if the next desired timer is more than 4 seconds in the
136 		 * future then round the timer to whole seconds
137 		 */
138 		if (expires > 4*HZ)
139 			expires = round_jiffies_relative(expires);
140 		schedule_delayed_work(&dst_gc_work, expires);
141 	}
142 
143 	spin_unlock_bh(&dst_garbage.lock);
144 	mutex_unlock(&dst_gc_mutex);
145 }
146 
147 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
148 {
149 	kfree_skb(skb);
150 	return 0;
151 }
152 EXPORT_SYMBOL(dst_discard_out);
153 
154 const struct dst_metrics dst_default_metrics = {
155 	/* This initializer is needed to force linker to place this variable
156 	 * into const section. Otherwise it might end into bss section.
157 	 * We really want to avoid false sharing on this variable, and catch
158 	 * any writes on it.
159 	 */
160 	.refcnt = ATOMIC_INIT(1),
161 };
162 
163 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
164 	      struct net_device *dev, int initial_ref, int initial_obsolete,
165 	      unsigned short flags)
166 {
167 	dst->child = NULL;
168 	dst->dev = dev;
169 	if (dev)
170 		dev_hold(dev);
171 	dst->ops = ops;
172 	dst_init_metrics(dst, dst_default_metrics.metrics, true);
173 	dst->expires = 0UL;
174 	dst->path = dst;
175 	dst->from = NULL;
176 #ifdef CONFIG_XFRM
177 	dst->xfrm = NULL;
178 #endif
179 	dst->input = dst_discard;
180 	dst->output = dst_discard_out;
181 	dst->error = 0;
182 	dst->obsolete = initial_obsolete;
183 	dst->header_len = 0;
184 	dst->trailer_len = 0;
185 #ifdef CONFIG_IP_ROUTE_CLASSID
186 	dst->tclassid = 0;
187 #endif
188 	dst->lwtstate = NULL;
189 	atomic_set(&dst->__refcnt, initial_ref);
190 	dst->__use = 0;
191 	dst->lastuse = jiffies;
192 	dst->flags = flags;
193 	dst->next = NULL;
194 	if (!(flags & DST_NOCOUNT))
195 		dst_entries_add(ops, 1);
196 }
197 EXPORT_SYMBOL(dst_init);
198 
199 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
200 		int initial_ref, int initial_obsolete, unsigned short flags)
201 {
202 	struct dst_entry *dst;
203 
204 	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
205 		if (ops->gc(ops))
206 			return NULL;
207 	}
208 
209 	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
210 	if (!dst)
211 		return NULL;
212 
213 	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
214 
215 	return dst;
216 }
217 EXPORT_SYMBOL(dst_alloc);
218 
219 static void ___dst_free(struct dst_entry *dst)
220 {
221 	/* The first case (dev==NULL) is required, when
222 	   protocol module is unloaded.
223 	 */
224 	if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
225 		dst->input = dst_discard;
226 		dst->output = dst_discard_out;
227 	}
228 	dst->obsolete = DST_OBSOLETE_DEAD;
229 }
230 
231 void __dst_free(struct dst_entry *dst)
232 {
233 	spin_lock_bh(&dst_garbage.lock);
234 	___dst_free(dst);
235 	dst->next = dst_garbage.list;
236 	dst_garbage.list = dst;
237 	if (dst_garbage.timer_inc > DST_GC_INC) {
238 		dst_garbage.timer_inc = DST_GC_INC;
239 		dst_garbage.timer_expires = DST_GC_MIN;
240 		mod_delayed_work(system_wq, &dst_gc_work,
241 				 dst_garbage.timer_expires);
242 	}
243 	spin_unlock_bh(&dst_garbage.lock);
244 }
245 EXPORT_SYMBOL(__dst_free);
246 
247 struct dst_entry *dst_destroy(struct dst_entry * dst)
248 {
249 	struct dst_entry *child;
250 
251 	smp_rmb();
252 
253 again:
254 	child = dst->child;
255 
256 	if (!(dst->flags & DST_NOCOUNT))
257 		dst_entries_add(dst->ops, -1);
258 
259 	if (dst->ops->destroy)
260 		dst->ops->destroy(dst);
261 	if (dst->dev)
262 		dev_put(dst->dev);
263 
264 	lwtstate_put(dst->lwtstate);
265 
266 	if (dst->flags & DST_METADATA)
267 		metadata_dst_free((struct metadata_dst *)dst);
268 	else
269 		kmem_cache_free(dst->ops->kmem_cachep, dst);
270 
271 	dst = child;
272 	if (dst) {
273 		int nohash = dst->flags & DST_NOHASH;
274 
275 		if (atomic_dec_and_test(&dst->__refcnt)) {
276 			/* We were real parent of this dst, so kill child. */
277 			if (nohash)
278 				goto again;
279 		} else {
280 			/* Child is still referenced, return it for freeing. */
281 			if (nohash)
282 				return dst;
283 			/* Child is still in his hash table */
284 		}
285 	}
286 	return NULL;
287 }
288 EXPORT_SYMBOL(dst_destroy);
289 
290 static void dst_destroy_rcu(struct rcu_head *head)
291 {
292 	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
293 
294 	dst = dst_destroy(dst);
295 	if (dst)
296 		__dst_free(dst);
297 }
298 
299 /* Operations to mark dst as DEAD and clean up the net device referenced
300  * by dst:
301  * 1. put the dst under loopback interface and discard all tx/rx packets
302  *    on this route.
303  * 2. release the net_device
304  * This function should be called when removing routes from the fib tree
305  * in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
306  * make the next dst_ops->check() fail.
307  */
308 void dst_dev_put(struct dst_entry *dst)
309 {
310 	struct net_device *dev = dst->dev;
311 
312 	dst->obsolete = DST_OBSOLETE_DEAD;
313 	if (dst->ops->ifdown)
314 		dst->ops->ifdown(dst, dev, true);
315 	dst->input = dst_discard;
316 	dst->output = dst_discard_out;
317 	dst->dev = dev_net(dst->dev)->loopback_dev;
318 	dev_hold(dst->dev);
319 	dev_put(dev);
320 }
321 EXPORT_SYMBOL(dst_dev_put);
322 
323 void dst_release(struct dst_entry *dst)
324 {
325 	if (dst) {
326 		int newrefcnt;
327 		unsigned short destroy_after_rcu = dst->flags &
328 						   (DST_NOCACHE | DST_NOGC);
329 
330 		newrefcnt = atomic_dec_return(&dst->__refcnt);
331 		if (unlikely(newrefcnt < 0))
332 			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
333 					     __func__, dst, newrefcnt);
334 		if (!newrefcnt && unlikely(destroy_after_rcu))
335 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
336 	}
337 }
338 EXPORT_SYMBOL(dst_release);
339 
340 void dst_release_immediate(struct dst_entry *dst)
341 {
342 	if (dst) {
343 		int newrefcnt;
344 
345 		newrefcnt = atomic_dec_return(&dst->__refcnt);
346 		if (unlikely(newrefcnt < 0))
347 			net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
348 					     __func__, dst, newrefcnt);
349 		if (!newrefcnt)
350 			dst_destroy(dst);
351 	}
352 }
353 EXPORT_SYMBOL(dst_release_immediate);
354 
355 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
356 {
357 	struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
358 
359 	if (p) {
360 		struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
361 		unsigned long prev, new;
362 
363 		atomic_set(&p->refcnt, 1);
364 		memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
365 
366 		new = (unsigned long) p;
367 		prev = cmpxchg(&dst->_metrics, old, new);
368 
369 		if (prev != old) {
370 			kfree(p);
371 			p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
372 			if (prev & DST_METRICS_READ_ONLY)
373 				p = NULL;
374 		} else if (prev & DST_METRICS_REFCOUNTED) {
375 			if (atomic_dec_and_test(&old_p->refcnt))
376 				kfree(old_p);
377 		}
378 	}
379 	BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
380 	return (u32 *)p;
381 }
382 EXPORT_SYMBOL(dst_cow_metrics_generic);
383 
384 /* Caller asserts that dst_metrics_read_only(dst) is false.  */
385 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
386 {
387 	unsigned long prev, new;
388 
389 	new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
390 	prev = cmpxchg(&dst->_metrics, old, new);
391 	if (prev == old)
392 		kfree(__DST_METRICS_PTR(old));
393 }
394 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
395 
396 static struct dst_ops md_dst_ops = {
397 	.family =		AF_UNSPEC,
398 };
399 
400 static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
401 {
402 	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
403 	kfree_skb(skb);
404 	return 0;
405 }
406 
407 static int dst_md_discard(struct sk_buff *skb)
408 {
409 	WARN_ONCE(1, "Attempting to call input on metadata dst\n");
410 	kfree_skb(skb);
411 	return 0;
412 }
413 
414 static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
415 {
416 	struct dst_entry *dst;
417 
418 	dst = &md_dst->dst;
419 	dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
420 		 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
421 
422 	dst->input = dst_md_discard;
423 	dst->output = dst_md_discard_out;
424 
425 	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
426 }
427 
428 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
429 {
430 	struct metadata_dst *md_dst;
431 
432 	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
433 	if (!md_dst)
434 		return NULL;
435 
436 	__metadata_dst_init(md_dst, optslen);
437 
438 	return md_dst;
439 }
440 EXPORT_SYMBOL_GPL(metadata_dst_alloc);
441 
442 void metadata_dst_free(struct metadata_dst *md_dst)
443 {
444 #ifdef CONFIG_DST_CACHE
445 	dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
446 #endif
447 	kfree(md_dst);
448 }
449 
450 struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
451 {
452 	int cpu;
453 	struct metadata_dst __percpu *md_dst;
454 
455 	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
456 				    __alignof__(struct metadata_dst), flags);
457 	if (!md_dst)
458 		return NULL;
459 
460 	for_each_possible_cpu(cpu)
461 		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
462 
463 	return md_dst;
464 }
465 EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
466 
467 /* Dirty hack. We did it in 2.2 (in __dst_free),
468  * we have _very_ good reasons not to repeat
469  * this mistake in 2.3, but we have no choice
470  * now. _It_ _is_ _explicit_ _deliberate_
471  * _race_ _condition_.
472  *
473  * Commented and originally written by Alexey.
474  */
475 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
476 		       int unregister)
477 {
478 	if (dst->ops->ifdown)
479 		dst->ops->ifdown(dst, dev, unregister);
480 
481 	if (dev != dst->dev)
482 		return;
483 
484 	if (!unregister) {
485 		dst->input = dst_discard;
486 		dst->output = dst_discard_out;
487 	} else {
488 		dst->dev = dev_net(dst->dev)->loopback_dev;
489 		dev_hold(dst->dev);
490 		dev_put(dev);
491 	}
492 }
493 
494 static int dst_dev_event(struct notifier_block *this, unsigned long event,
495 			 void *ptr)
496 {
497 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
498 	struct dst_entry *dst, *last = NULL;
499 
500 	switch (event) {
501 	case NETDEV_UNREGISTER_FINAL:
502 	case NETDEV_DOWN:
503 		mutex_lock(&dst_gc_mutex);
504 		for (dst = dst_busy_list; dst; dst = dst->next) {
505 			last = dst;
506 			dst_ifdown(dst, dev, event != NETDEV_DOWN);
507 		}
508 
509 		spin_lock_bh(&dst_garbage.lock);
510 		dst = dst_garbage.list;
511 		dst_garbage.list = NULL;
512 		/* The code in dst_ifdown places a hold on the loopback device.
513 		 * If the gc entry processing is set to expire after a lengthy
514 		 * interval, this hold can cause netdev_wait_allrefs() to hang
515 		 * out and wait for a long time -- until the the loopback
516 		 * interface is released.  If we're really unlucky, it'll emit
517 		 * pr_emerg messages to console too.  Reset the interval here,
518 		 * so dst cleanups occur in a more timely fashion.
519 		 */
520 		if (dst_garbage.timer_inc > DST_GC_INC) {
521 			dst_garbage.timer_inc = DST_GC_INC;
522 			dst_garbage.timer_expires = DST_GC_MIN;
523 			mod_delayed_work(system_wq, &dst_gc_work,
524 					 dst_garbage.timer_expires);
525 		}
526 		spin_unlock_bh(&dst_garbage.lock);
527 
528 		if (last)
529 			last->next = dst;
530 		else
531 			dst_busy_list = dst;
532 		for (; dst; dst = dst->next)
533 			dst_ifdown(dst, dev, event != NETDEV_DOWN);
534 		mutex_unlock(&dst_gc_mutex);
535 		break;
536 	}
537 	return NOTIFY_DONE;
538 }
539 
540 static struct notifier_block dst_dev_notifier = {
541 	.notifier_call	= dst_dev_event,
542 	.priority = -10, /* must be called after other network notifiers */
543 };
544 
545 void __init dst_subsys_init(void)
546 {
547 	register_netdevice_notifier(&dst_dev_notifier);
548 }
549