1 /* 2 * net/core/dst.c Protocol independent destination cache. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/errno.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/workqueue.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/netdevice.h> 17 #include <linux/skbuff.h> 18 #include <linux/string.h> 19 #include <linux/types.h> 20 #include <net/net_namespace.h> 21 #include <linux/sched.h> 22 #include <linux/prefetch.h> 23 24 #include <net/dst.h> 25 26 /* 27 * Theory of operations: 28 * 1) We use a list, protected by a spinlock, to add 29 * new entries from both BH and non-BH context. 30 * 2) In order to keep spinlock held for a small delay, 31 * we use a second list where are stored long lived 32 * entries, that are handled by the garbage collect thread 33 * fired by a workqueue. 34 * 3) This list is guarded by a mutex, 35 * so that the gc_task and dst_dev_event() can be synchronized. 36 */ 37 38 /* 39 * We want to keep lock & list close together 40 * to dirty as few cache lines as possible in __dst_free(). 41 * As this is not a very strong hint, we dont force an alignment on SMP. 42 */ 43 static struct { 44 spinlock_t lock; 45 struct dst_entry *list; 46 unsigned long timer_inc; 47 unsigned long timer_expires; 48 } dst_garbage = { 49 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), 50 .timer_inc = DST_GC_MAX, 51 }; 52 static void dst_gc_task(struct work_struct *work); 53 static void ___dst_free(struct dst_entry *dst); 54 55 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); 56 57 static DEFINE_MUTEX(dst_gc_mutex); 58 /* 59 * long lived entries are maintained in this list, guarded by dst_gc_mutex 60 */ 61 static struct dst_entry *dst_busy_list; 62 63 static void dst_gc_task(struct work_struct *work) 64 { 65 int delayed = 0; 66 int work_performed = 0; 67 unsigned long expires = ~0L; 68 struct dst_entry *dst, *next, head; 69 struct dst_entry *last = &head; 70 71 mutex_lock(&dst_gc_mutex); 72 next = dst_busy_list; 73 74 loop: 75 while ((dst = next) != NULL) { 76 next = dst->next; 77 prefetch(&next->next); 78 cond_resched(); 79 if (likely(atomic_read(&dst->__refcnt))) { 80 last->next = dst; 81 last = dst; 82 delayed++; 83 continue; 84 } 85 work_performed++; 86 87 dst = dst_destroy(dst); 88 if (dst) { 89 /* NOHASH and still referenced. Unless it is already 90 * on gc list, invalidate it and add to gc list. 91 * 92 * Note: this is temporary. Actually, NOHASH dst's 93 * must be obsoleted when parent is obsoleted. 94 * But we do not have state "obsoleted, but 95 * referenced by parent", so it is right. 96 */ 97 if (dst->obsolete > 0) 98 continue; 99 100 ___dst_free(dst); 101 dst->next = next; 102 next = dst; 103 } 104 } 105 106 spin_lock_bh(&dst_garbage.lock); 107 next = dst_garbage.list; 108 if (next) { 109 dst_garbage.list = NULL; 110 spin_unlock_bh(&dst_garbage.lock); 111 goto loop; 112 } 113 last->next = NULL; 114 dst_busy_list = head.next; 115 if (!dst_busy_list) 116 dst_garbage.timer_inc = DST_GC_MAX; 117 else { 118 /* 119 * if we freed less than 1/10 of delayed entries, 120 * we can sleep longer. 121 */ 122 if (work_performed <= delayed/10) { 123 dst_garbage.timer_expires += dst_garbage.timer_inc; 124 if (dst_garbage.timer_expires > DST_GC_MAX) 125 dst_garbage.timer_expires = DST_GC_MAX; 126 dst_garbage.timer_inc += DST_GC_INC; 127 } else { 128 dst_garbage.timer_inc = DST_GC_INC; 129 dst_garbage.timer_expires = DST_GC_MIN; 130 } 131 expires = dst_garbage.timer_expires; 132 /* 133 * if the next desired timer is more than 4 seconds in the 134 * future then round the timer to whole seconds 135 */ 136 if (expires > 4*HZ) 137 expires = round_jiffies_relative(expires); 138 schedule_delayed_work(&dst_gc_work, expires); 139 } 140 141 spin_unlock_bh(&dst_garbage.lock); 142 mutex_unlock(&dst_gc_mutex); 143 } 144 145 int dst_discard(struct sk_buff *skb) 146 { 147 kfree_skb(skb); 148 return 0; 149 } 150 EXPORT_SYMBOL(dst_discard); 151 152 const u32 dst_default_metrics[RTAX_MAX + 1] = { 153 /* This initializer is needed to force linker to place this variable 154 * into const section. Otherwise it might end into bss section. 155 * We really want to avoid false sharing on this variable, and catch 156 * any writes on it. 157 */ 158 [RTAX_MAX] = 0xdeadbeef, 159 }; 160 161 162 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, 163 int initial_ref, int initial_obsolete, unsigned short flags) 164 { 165 struct dst_entry *dst; 166 167 if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) { 168 if (ops->gc(ops)) 169 return NULL; 170 } 171 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC); 172 if (!dst) 173 return NULL; 174 dst->child = NULL; 175 dst->dev = dev; 176 if (dev) 177 dev_hold(dev); 178 dst->ops = ops; 179 dst_init_metrics(dst, dst_default_metrics, true); 180 dst->expires = 0UL; 181 dst->path = dst; 182 #ifdef CONFIG_XFRM 183 dst->xfrm = NULL; 184 #endif 185 dst->input = dst_discard; 186 dst->output = dst_discard; 187 dst->error = 0; 188 dst->obsolete = initial_obsolete; 189 dst->header_len = 0; 190 dst->trailer_len = 0; 191 #ifdef CONFIG_IP_ROUTE_CLASSID 192 dst->tclassid = 0; 193 #endif 194 atomic_set(&dst->__refcnt, initial_ref); 195 dst->__use = 0; 196 dst->lastuse = jiffies; 197 dst->flags = flags; 198 dst->pending_confirm = 0; 199 dst->next = NULL; 200 if (!(flags & DST_NOCOUNT)) 201 dst_entries_add(ops, 1); 202 return dst; 203 } 204 EXPORT_SYMBOL(dst_alloc); 205 206 static void ___dst_free(struct dst_entry *dst) 207 { 208 /* The first case (dev==NULL) is required, when 209 protocol module is unloaded. 210 */ 211 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) 212 dst->input = dst->output = dst_discard; 213 dst->obsolete = DST_OBSOLETE_DEAD; 214 } 215 216 void __dst_free(struct dst_entry *dst) 217 { 218 spin_lock_bh(&dst_garbage.lock); 219 ___dst_free(dst); 220 dst->next = dst_garbage.list; 221 dst_garbage.list = dst; 222 if (dst_garbage.timer_inc > DST_GC_INC) { 223 dst_garbage.timer_inc = DST_GC_INC; 224 dst_garbage.timer_expires = DST_GC_MIN; 225 mod_delayed_work(system_wq, &dst_gc_work, 226 dst_garbage.timer_expires); 227 } 228 spin_unlock_bh(&dst_garbage.lock); 229 } 230 EXPORT_SYMBOL(__dst_free); 231 232 struct dst_entry *dst_destroy(struct dst_entry * dst) 233 { 234 struct dst_entry *child; 235 236 smp_rmb(); 237 238 again: 239 child = dst->child; 240 241 if (!(dst->flags & DST_NOCOUNT)) 242 dst_entries_add(dst->ops, -1); 243 244 if (dst->ops->destroy) 245 dst->ops->destroy(dst); 246 if (dst->dev) 247 dev_put(dst->dev); 248 kmem_cache_free(dst->ops->kmem_cachep, dst); 249 250 dst = child; 251 if (dst) { 252 int nohash = dst->flags & DST_NOHASH; 253 254 if (atomic_dec_and_test(&dst->__refcnt)) { 255 /* We were real parent of this dst, so kill child. */ 256 if (nohash) 257 goto again; 258 } else { 259 /* Child is still referenced, return it for freeing. */ 260 if (nohash) 261 return dst; 262 /* Child is still in his hash table */ 263 } 264 } 265 return NULL; 266 } 267 EXPORT_SYMBOL(dst_destroy); 268 269 void dst_release(struct dst_entry *dst) 270 { 271 if (dst) { 272 int newrefcnt; 273 274 newrefcnt = atomic_dec_return(&dst->__refcnt); 275 WARN_ON(newrefcnt < 0); 276 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { 277 dst = dst_destroy(dst); 278 if (dst) 279 __dst_free(dst); 280 } 281 } 282 } 283 EXPORT_SYMBOL(dst_release); 284 285 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) 286 { 287 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); 288 289 if (p) { 290 u32 *old_p = __DST_METRICS_PTR(old); 291 unsigned long prev, new; 292 293 memcpy(p, old_p, sizeof(u32) * RTAX_MAX); 294 295 new = (unsigned long) p; 296 prev = cmpxchg(&dst->_metrics, old, new); 297 298 if (prev != old) { 299 kfree(p); 300 p = __DST_METRICS_PTR(prev); 301 if (prev & DST_METRICS_READ_ONLY) 302 p = NULL; 303 } 304 } 305 return p; 306 } 307 EXPORT_SYMBOL(dst_cow_metrics_generic); 308 309 /* Caller asserts that dst_metrics_read_only(dst) is false. */ 310 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old) 311 { 312 unsigned long prev, new; 313 314 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; 315 prev = cmpxchg(&dst->_metrics, old, new); 316 if (prev == old) 317 kfree(__DST_METRICS_PTR(old)); 318 } 319 EXPORT_SYMBOL(__dst_destroy_metrics_generic); 320 321 /** 322 * skb_dst_set_noref - sets skb dst, without a reference 323 * @skb: buffer 324 * @dst: dst entry 325 * 326 * Sets skb dst, assuming a reference was not taken on dst 327 * skb_dst_drop() should not dst_release() this dst 328 */ 329 void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 330 { 331 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 332 /* If dst not in cache, we must take a reference, because 333 * dst_release() will destroy dst as soon as its refcount becomes zero 334 */ 335 if (unlikely(dst->flags & DST_NOCACHE)) { 336 dst_hold(dst); 337 skb_dst_set(skb, dst); 338 } else { 339 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; 340 } 341 } 342 EXPORT_SYMBOL(skb_dst_set_noref); 343 344 /* Dirty hack. We did it in 2.2 (in __dst_free), 345 * we have _very_ good reasons not to repeat 346 * this mistake in 2.3, but we have no choice 347 * now. _It_ _is_ _explicit_ _deliberate_ 348 * _race_ _condition_. 349 * 350 * Commented and originally written by Alexey. 351 */ 352 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev, 353 int unregister) 354 { 355 if (dst->ops->ifdown) 356 dst->ops->ifdown(dst, dev, unregister); 357 358 if (dev != dst->dev) 359 return; 360 361 if (!unregister) { 362 dst->input = dst->output = dst_discard; 363 } else { 364 dst->dev = dev_net(dst->dev)->loopback_dev; 365 dev_hold(dst->dev); 366 dev_put(dev); 367 } 368 } 369 370 static int dst_dev_event(struct notifier_block *this, unsigned long event, 371 void *ptr) 372 { 373 struct net_device *dev = ptr; 374 struct dst_entry *dst, *last = NULL; 375 376 switch (event) { 377 case NETDEV_UNREGISTER_FINAL: 378 case NETDEV_DOWN: 379 mutex_lock(&dst_gc_mutex); 380 for (dst = dst_busy_list; dst; dst = dst->next) { 381 last = dst; 382 dst_ifdown(dst, dev, event != NETDEV_DOWN); 383 } 384 385 spin_lock_bh(&dst_garbage.lock); 386 dst = dst_garbage.list; 387 dst_garbage.list = NULL; 388 spin_unlock_bh(&dst_garbage.lock); 389 390 if (last) 391 last->next = dst; 392 else 393 dst_busy_list = dst; 394 for (; dst; dst = dst->next) 395 dst_ifdown(dst, dev, event != NETDEV_DOWN); 396 mutex_unlock(&dst_gc_mutex); 397 break; 398 } 399 return NOTIFY_DONE; 400 } 401 402 static struct notifier_block dst_dev_notifier = { 403 .notifier_call = dst_dev_event, 404 .priority = -10, /* must be called after other network notifiers */ 405 }; 406 407 void __init dst_init(void) 408 { 409 register_netdevice_notifier(&dst_dev_notifier); 410 } 411