1 /* 2 * net/core/dst.c Protocol independent destination cache. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/errno.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/workqueue.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/netdevice.h> 17 #include <linux/skbuff.h> 18 #include <linux/string.h> 19 #include <linux/types.h> 20 #include <net/net_namespace.h> 21 #include <linux/sched.h> 22 #include <linux/prefetch.h> 23 24 #include <net/dst.h> 25 #include <net/dst_metadata.h> 26 27 /* 28 * Theory of operations: 29 * 1) We use a list, protected by a spinlock, to add 30 * new entries from both BH and non-BH context. 31 * 2) In order to keep spinlock held for a small delay, 32 * we use a second list where are stored long lived 33 * entries, that are handled by the garbage collect thread 34 * fired by a workqueue. 35 * 3) This list is guarded by a mutex, 36 * so that the gc_task and dst_dev_event() can be synchronized. 37 */ 38 39 /* 40 * We want to keep lock & list close together 41 * to dirty as few cache lines as possible in __dst_free(). 42 * As this is not a very strong hint, we dont force an alignment on SMP. 43 */ 44 static struct { 45 spinlock_t lock; 46 struct dst_entry *list; 47 unsigned long timer_inc; 48 unsigned long timer_expires; 49 } dst_garbage = { 50 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), 51 .timer_inc = DST_GC_MAX, 52 }; 53 static void dst_gc_task(struct work_struct *work); 54 static void ___dst_free(struct dst_entry *dst); 55 56 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); 57 58 static DEFINE_MUTEX(dst_gc_mutex); 59 /* 60 * long lived entries are maintained in this list, guarded by dst_gc_mutex 61 */ 62 static struct dst_entry *dst_busy_list; 63 64 static void dst_gc_task(struct work_struct *work) 65 { 66 int delayed = 0; 67 int work_performed = 0; 68 unsigned long expires = ~0L; 69 struct dst_entry *dst, *next, head; 70 struct dst_entry *last = &head; 71 72 mutex_lock(&dst_gc_mutex); 73 next = dst_busy_list; 74 75 loop: 76 while ((dst = next) != NULL) { 77 next = dst->next; 78 prefetch(&next->next); 79 cond_resched(); 80 if (likely(atomic_read(&dst->__refcnt))) { 81 last->next = dst; 82 last = dst; 83 delayed++; 84 continue; 85 } 86 work_performed++; 87 88 dst = dst_destroy(dst); 89 if (dst) { 90 /* NOHASH and still referenced. Unless it is already 91 * on gc list, invalidate it and add to gc list. 92 * 93 * Note: this is temporary. Actually, NOHASH dst's 94 * must be obsoleted when parent is obsoleted. 95 * But we do not have state "obsoleted, but 96 * referenced by parent", so it is right. 97 */ 98 if (dst->obsolete > 0) 99 continue; 100 101 ___dst_free(dst); 102 dst->next = next; 103 next = dst; 104 } 105 } 106 107 spin_lock_bh(&dst_garbage.lock); 108 next = dst_garbage.list; 109 if (next) { 110 dst_garbage.list = NULL; 111 spin_unlock_bh(&dst_garbage.lock); 112 goto loop; 113 } 114 last->next = NULL; 115 dst_busy_list = head.next; 116 if (!dst_busy_list) 117 dst_garbage.timer_inc = DST_GC_MAX; 118 else { 119 /* 120 * if we freed less than 1/10 of delayed entries, 121 * we can sleep longer. 122 */ 123 if (work_performed <= delayed/10) { 124 dst_garbage.timer_expires += dst_garbage.timer_inc; 125 if (dst_garbage.timer_expires > DST_GC_MAX) 126 dst_garbage.timer_expires = DST_GC_MAX; 127 dst_garbage.timer_inc += DST_GC_INC; 128 } else { 129 dst_garbage.timer_inc = DST_GC_INC; 130 dst_garbage.timer_expires = DST_GC_MIN; 131 } 132 expires = dst_garbage.timer_expires; 133 /* 134 * if the next desired timer is more than 4 seconds in the 135 * future then round the timer to whole seconds 136 */ 137 if (expires > 4*HZ) 138 expires = round_jiffies_relative(expires); 139 schedule_delayed_work(&dst_gc_work, expires); 140 } 141 142 spin_unlock_bh(&dst_garbage.lock); 143 mutex_unlock(&dst_gc_mutex); 144 } 145 146 int dst_discard_sk(struct sock *sk, struct sk_buff *skb) 147 { 148 kfree_skb(skb); 149 return 0; 150 } 151 EXPORT_SYMBOL(dst_discard_sk); 152 153 const u32 dst_default_metrics[RTAX_MAX + 1] = { 154 /* This initializer is needed to force linker to place this variable 155 * into const section. Otherwise it might end into bss section. 156 * We really want to avoid false sharing on this variable, and catch 157 * any writes on it. 158 */ 159 [RTAX_MAX] = 0xdeadbeef, 160 }; 161 162 void dst_init(struct dst_entry *dst, struct dst_ops *ops, 163 struct net_device *dev, int initial_ref, int initial_obsolete, 164 unsigned short flags) 165 { 166 dst->child = NULL; 167 dst->dev = dev; 168 if (dev) 169 dev_hold(dev); 170 dst->ops = ops; 171 dst_init_metrics(dst, dst_default_metrics, true); 172 dst->expires = 0UL; 173 dst->path = dst; 174 dst->from = NULL; 175 #ifdef CONFIG_XFRM 176 dst->xfrm = NULL; 177 #endif 178 dst->input = dst_discard; 179 dst->output = dst_discard_sk; 180 dst->error = 0; 181 dst->obsolete = initial_obsolete; 182 dst->header_len = 0; 183 dst->trailer_len = 0; 184 #ifdef CONFIG_IP_ROUTE_CLASSID 185 dst->tclassid = 0; 186 #endif 187 atomic_set(&dst->__refcnt, initial_ref); 188 dst->__use = 0; 189 dst->lastuse = jiffies; 190 dst->flags = flags; 191 dst->pending_confirm = 0; 192 dst->next = NULL; 193 if (!(flags & DST_NOCOUNT)) 194 dst_entries_add(ops, 1); 195 } 196 EXPORT_SYMBOL(dst_init); 197 198 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 199 int initial_ref, int initial_obsolete, unsigned short flags) 200 { 201 struct dst_entry *dst; 202 203 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) { 204 if (ops->gc(ops)) 205 return NULL; 206 } 207 208 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); 209 if (!dst) 210 return NULL; 211 212 dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags); 213 214 return dst; 215 } 216 EXPORT_SYMBOL(dst_alloc); 217 218 static void ___dst_free(struct dst_entry *dst) 219 { 220 /* The first case (dev==NULL) is required, when 221 protocol module is unloaded. 222 */ 223 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { 224 dst->input = dst_discard; 225 dst->output = dst_discard_sk; 226 } 227 dst->obsolete = DST_OBSOLETE_DEAD; 228 } 229 230 void __dst_free(struct dst_entry *dst) 231 { 232 spin_lock_bh(&dst_garbage.lock); 233 ___dst_free(dst); 234 dst->next = dst_garbage.list; 235 dst_garbage.list = dst; 236 if (dst_garbage.timer_inc > DST_GC_INC) { 237 dst_garbage.timer_inc = DST_GC_INC; 238 dst_garbage.timer_expires = DST_GC_MIN; 239 mod_delayed_work(system_wq, &dst_gc_work, 240 dst_garbage.timer_expires); 241 } 242 spin_unlock_bh(&dst_garbage.lock); 243 } 244 EXPORT_SYMBOL(__dst_free); 245 246 struct dst_entry *dst_destroy(struct dst_entry * dst) 247 { 248 struct dst_entry *child; 249 250 smp_rmb(); 251 252 again: 253 child = dst->child; 254 255 if (!(dst->flags & DST_NOCOUNT)) 256 dst_entries_add(dst->ops, -1); 257 258 if (dst->ops->destroy) 259 dst->ops->destroy(dst); 260 if (dst->dev) 261 dev_put(dst->dev); 262 263 if (dst->flags & DST_METADATA) 264 kfree(dst); 265 else 266 kmem_cache_free(dst->ops->kmem_cachep, dst); 267 268 dst = child; 269 if (dst) { 270 int nohash = dst->flags & DST_NOHASH; 271 272 if (atomic_dec_and_test(&dst->__refcnt)) { 273 /* We were real parent of this dst, so kill child. */ 274 if (nohash) 275 goto again; 276 } else { 277 /* Child is still referenced, return it for freeing. */ 278 if (nohash) 279 return dst; 280 /* Child is still in his hash table */ 281 } 282 } 283 return NULL; 284 } 285 EXPORT_SYMBOL(dst_destroy); 286 287 static void dst_destroy_rcu(struct rcu_head *head) 288 { 289 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 290 291 dst = dst_destroy(dst); 292 if (dst) 293 __dst_free(dst); 294 } 295 296 void dst_release(struct dst_entry *dst) 297 { 298 if (dst) { 299 int newrefcnt; 300 301 newrefcnt = atomic_dec_return(&dst->__refcnt); 302 WARN_ON(newrefcnt < 0); 303 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 304 call_rcu(&dst->rcu_head, dst_destroy_rcu); 305 } 306 } 307 EXPORT_SYMBOL(dst_release); 308 309 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) 310 { 311 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); 312 313 if (p) { 314 u32 *old_p = __DST_METRICS_PTR(old); 315 unsigned long prev, new; 316 317 memcpy(p, old_p, sizeof(u32) * RTAX_MAX); 318 319 new = (unsigned long) p; 320 prev = cmpxchg(&dst->_metrics, old, new); 321 322 if (prev != old) { 323 kfree(p); 324 p = __DST_METRICS_PTR(prev); 325 if (prev & DST_METRICS_READ_ONLY) 326 p = NULL; 327 } 328 } 329 return p; 330 } 331 EXPORT_SYMBOL(dst_cow_metrics_generic); 332 333 /* Caller asserts that dst_metrics_read_only(dst) is false. */ 334 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) 335 { 336 unsigned long prev, new; 337 338 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; 339 prev = cmpxchg(&dst->_metrics, old, new); 340 if (prev == old) 341 kfree(__DST_METRICS_PTR(old)); 342 } 343 EXPORT_SYMBOL(__dst_destroy_metrics_generic); 344 345 static struct dst_ops md_dst_ops = { 346 .family = AF_UNSPEC, 347 }; 348 349 static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb) 350 { 351 WARN_ONCE(1, "Attempting to call output on metadata dst\n"); 352 kfree_skb(skb); 353 return 0; 354 } 355 356 static int dst_md_discard(struct sk_buff *skb) 357 { 358 WARN_ONCE(1, "Attempting to call input on metadata dst\n"); 359 kfree_skb(skb); 360 return 0; 361 } 362 363 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags) 364 { 365 struct metadata_dst *md_dst; 366 struct dst_entry *dst; 367 368 md_dst = kmalloc(sizeof(*md_dst) + optslen, flags); 369 if (!md_dst) 370 return ERR_PTR(-ENOMEM); 371 372 dst = &md_dst->dst; 373 dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE, 374 DST_METADATA | DST_NOCACHE | DST_NOCOUNT); 375 376 dst->input = dst_md_discard; 377 dst->output = dst_md_discard_sk; 378 379 memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst)); 380 md_dst->opts_len = optslen; 381 382 return md_dst; 383 } 384 EXPORT_SYMBOL_GPL(metadata_dst_alloc); 385 386 /* Dirty hack. We did it in 2.2 (in __dst_free), 387 * we have _very_ good reasons not to repeat 388 * this mistake in 2.3, but we have no choice 389 * now. _It_ _is_ _explicit_ _deliberate_ 390 * _race_ _condition_. 391 * 392 * Commented and originally written by Alexey. 393 */ 394 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, 395 int unregister) 396 { 397 if (dst->ops->ifdown) 398 dst->ops->ifdown(dst, dev, unregister); 399 400 if (dev != dst->dev) 401 return; 402 403 if (!unregister) { 404 dst->input = dst_discard; 405 dst->output = dst_discard_sk; 406 } else { 407 dst->dev = dev_net(dst->dev)->loopback_dev; 408 dev_hold(dst->dev); 409 dev_put(dev); 410 } 411 } 412 413 static int dst_dev_event(struct notifier_block *this, unsigned long event, 414 void *ptr) 415 { 416 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 417 struct dst_entry *dst, *last = NULL; 418 419 switch (event) { 420 case NETDEV_UNREGISTER_FINAL: 421 case NETDEV_DOWN: 422 mutex_lock(&dst_gc_mutex); 423 for (dst = dst_busy_list; dst; dst = dst->next) { 424 last = dst; 425 dst_ifdown(dst, dev, event != NETDEV_DOWN); 426 } 427 428 spin_lock_bh(&dst_garbage.lock); 429 dst = dst_garbage.list; 430 dst_garbage.list = NULL; 431 spin_unlock_bh(&dst_garbage.lock); 432 433 if (last) 434 last->next = dst; 435 else 436 dst_busy_list = dst; 437 for (; dst; dst = dst->next) 438 dst_ifdown(dst, dev, event != NETDEV_DOWN); 439 mutex_unlock(&dst_gc_mutex); 440 break; 441 } 442 return NOTIFY_DONE; 443 } 444 445 static struct notifier_block dst_dev_notifier = { 446 .notifier_call = dst_dev_event, 447 .priority = -10, /* must be called after other network notifiers */ 448 }; 449 450 void __init dst_subsys_init(void) 451 { 452 register_netdevice_notifier(&dst_dev_notifier); 453 } 454