1 /* 2 * net/core/dst.c Protocol independent destination cache. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/errno.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/workqueue.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <linux/netdevice.h> 16 #include <linux/skbuff.h> 17 #include <linux/string.h> 18 #include <linux/types.h> 19 #include <net/net_namespace.h> 20 21 #include <net/net_namespace.h> 22 #include <net/dst.h> 23 24 /* 25 * Theory of operations: 26 * 1) We use a list, protected by a spinlock, to add 27 * new entries from both BH and non-BH context. 28 * 2) In order to keep spinlock held for a small delay, 29 * we use a second list where are stored long lived 30 * entries, that are handled by the garbage collect thread 31 * fired by a workqueue. 32 * 3) This list is guarded by a mutex, 33 * so that the gc_task and dst_dev_event() can be synchronized. 34 */ 35 #if RT_CACHE_DEBUG >= 2 36 static atomic_t dst_total = ATOMIC_INIT(0); 37 #endif 38 39 /* 40 * We want to keep lock & list close together 41 * to dirty as few cache lines as possible in __dst_free(). 42 * As this is not a very strong hint, we dont force an alignment on SMP. 43 */ 44 static struct { 45 spinlock_t lock; 46 struct dst_entry *list; 47 unsigned long timer_inc; 48 unsigned long timer_expires; 49 } dst_garbage = { 50 .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock), 51 .timer_inc = DST_GC_MAX, 52 }; 53 static void dst_gc_task(struct work_struct *work); 54 static void ___dst_free(struct dst_entry * dst); 55 56 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); 57 58 static DEFINE_MUTEX(dst_gc_mutex); 59 /* 60 * long lived entries are maintained in this list, guarded by dst_gc_mutex 61 */ 62 static struct dst_entry *dst_busy_list; 63 64 static void dst_gc_task(struct work_struct *work) 65 { 66 int delayed = 0; 67 int work_performed = 0; 68 unsigned long expires = ~0L; 69 struct dst_entry *dst, *next, head; 70 struct dst_entry *last = &head; 71 #if RT_CACHE_DEBUG >= 2 72 ktime_t time_start = ktime_get(); 73 struct timespec elapsed; 74 #endif 75 76 mutex_lock(&dst_gc_mutex); 77 next = dst_busy_list; 78 79 loop: 80 while ((dst = next) != NULL) { 81 next = dst->next; 82 prefetch(&next->next); 83 if (likely(atomic_read(&dst->__refcnt))) { 84 last->next = dst; 85 last = dst; 86 delayed++; 87 continue; 88 } 89 work_performed++; 90 91 dst = dst_destroy(dst); 92 if (dst) { 93 /* NOHASH and still referenced. Unless it is already 94 * on gc list, invalidate it and add to gc list. 95 * 96 * Note: this is temporary. Actually, NOHASH dst's 97 * must be obsoleted when parent is obsoleted. 98 * But we do not have state "obsoleted, but 99 * referenced by parent", so it is right. 100 */ 101 if (dst->obsolete > 1) 102 continue; 103 104 ___dst_free(dst); 105 dst->next = next; 106 next = dst; 107 } 108 } 109 110 spin_lock_bh(&dst_garbage.lock); 111 next = dst_garbage.list; 112 if (next) { 113 dst_garbage.list = NULL; 114 spin_unlock_bh(&dst_garbage.lock); 115 goto loop; 116 } 117 last->next = NULL; 118 dst_busy_list = head.next; 119 if (!dst_busy_list) 120 dst_garbage.timer_inc = DST_GC_MAX; 121 else { 122 /* 123 * if we freed less than 1/10 of delayed entries, 124 * we can sleep longer. 125 */ 126 if (work_performed <= delayed/10) { 127 dst_garbage.timer_expires += dst_garbage.timer_inc; 128 if (dst_garbage.timer_expires > DST_GC_MAX) 129 dst_garbage.timer_expires = DST_GC_MAX; 130 dst_garbage.timer_inc += DST_GC_INC; 131 } else { 132 dst_garbage.timer_inc = DST_GC_INC; 133 dst_garbage.timer_expires = DST_GC_MIN; 134 } 135 expires = dst_garbage.timer_expires; 136 /* 137 * if the next desired timer is more than 4 seconds in the future 138 * then round the timer to whole seconds 139 */ 140 if (expires > 4*HZ) 141 expires = round_jiffies_relative(expires); 142 schedule_delayed_work(&dst_gc_work, expires); 143 } 144 145 spin_unlock_bh(&dst_garbage.lock); 146 mutex_unlock(&dst_gc_mutex); 147 #if RT_CACHE_DEBUG >= 2 148 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start)); 149 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d" 150 " expires: %lu elapsed: %lu us\n", 151 atomic_read(&dst_total), delayed, work_performed, 152 expires, 153 elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); 154 #endif 155 } 156 157 static int dst_discard(struct sk_buff *skb) 158 { 159 kfree_skb(skb); 160 return 0; 161 } 162 163 void * dst_alloc(struct dst_ops * ops) 164 { 165 struct dst_entry * dst; 166 167 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { 168 if (ops->gc()) 169 return NULL; 170 } 171 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 172 if (!dst) 173 return NULL; 174 atomic_set(&dst->__refcnt, 0); 175 dst->ops = ops; 176 dst->lastuse = jiffies; 177 dst->path = dst; 178 dst->input = dst->output = dst_discard; 179 #if RT_CACHE_DEBUG >= 2 180 atomic_inc(&dst_total); 181 #endif 182 atomic_inc(&ops->entries); 183 return dst; 184 } 185 186 static void ___dst_free(struct dst_entry * dst) 187 { 188 /* The first case (dev==NULL) is required, when 189 protocol module is unloaded. 190 */ 191 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { 192 dst->input = dst->output = dst_discard; 193 } 194 dst->obsolete = 2; 195 } 196 197 void __dst_free(struct dst_entry * dst) 198 { 199 spin_lock_bh(&dst_garbage.lock); 200 ___dst_free(dst); 201 dst->next = dst_garbage.list; 202 dst_garbage.list = dst; 203 if (dst_garbage.timer_inc > DST_GC_INC) { 204 dst_garbage.timer_inc = DST_GC_INC; 205 dst_garbage.timer_expires = DST_GC_MIN; 206 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); 207 } 208 spin_unlock_bh(&dst_garbage.lock); 209 } 210 211 struct dst_entry *dst_destroy(struct dst_entry * dst) 212 { 213 struct dst_entry *child; 214 struct neighbour *neigh; 215 struct hh_cache *hh; 216 217 smp_rmb(); 218 219 again: 220 neigh = dst->neighbour; 221 hh = dst->hh; 222 child = dst->child; 223 224 dst->hh = NULL; 225 if (hh && atomic_dec_and_test(&hh->hh_refcnt)) 226 kfree(hh); 227 228 if (neigh) { 229 dst->neighbour = NULL; 230 neigh_release(neigh); 231 } 232 233 atomic_dec(&dst->ops->entries); 234 235 if (dst->ops->destroy) 236 dst->ops->destroy(dst); 237 if (dst->dev) 238 dev_put(dst->dev); 239 #if RT_CACHE_DEBUG >= 2 240 atomic_dec(&dst_total); 241 #endif 242 kmem_cache_free(dst->ops->kmem_cachep, dst); 243 244 dst = child; 245 if (dst) { 246 int nohash = dst->flags & DST_NOHASH; 247 248 if (atomic_dec_and_test(&dst->__refcnt)) { 249 /* We were real parent of this dst, so kill child. */ 250 if (nohash) 251 goto again; 252 } else { 253 /* Child is still referenced, return it for freeing. */ 254 if (nohash) 255 return dst; 256 /* Child is still in his hash table */ 257 } 258 } 259 return NULL; 260 } 261 262 /* Dirty hack. We did it in 2.2 (in __dst_free), 263 * we have _very_ good reasons not to repeat 264 * this mistake in 2.3, but we have no choice 265 * now. _It_ _is_ _explicit_ _deliberate_ 266 * _race_ _condition_. 267 * 268 * Commented and originally written by Alexey. 269 */ 270 static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, 271 int unregister) 272 { 273 if (dst->ops->ifdown) 274 dst->ops->ifdown(dst, dev, unregister); 275 276 if (dev != dst->dev) 277 return; 278 279 if (!unregister) { 280 dst->input = dst->output = dst_discard; 281 } else { 282 dst->dev = init_net.loopback_dev; 283 dev_hold(dst->dev); 284 dev_put(dev); 285 if (dst->neighbour && dst->neighbour->dev == dev) { 286 dst->neighbour->dev = init_net.loopback_dev; 287 dev_put(dev); 288 dev_hold(dst->neighbour->dev); 289 } 290 } 291 } 292 293 static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 294 { 295 struct net_device *dev = ptr; 296 struct dst_entry *dst, *last = NULL; 297 298 if (dev->nd_net != &init_net) 299 return NOTIFY_DONE; 300 301 switch (event) { 302 case NETDEV_UNREGISTER: 303 case NETDEV_DOWN: 304 mutex_lock(&dst_gc_mutex); 305 for (dst = dst_busy_list; dst; dst = dst->next) { 306 last = dst; 307 dst_ifdown(dst, dev, event != NETDEV_DOWN); 308 } 309 310 spin_lock_bh(&dst_garbage.lock); 311 dst = dst_garbage.list; 312 dst_garbage.list = NULL; 313 spin_unlock_bh(&dst_garbage.lock); 314 315 if (last) 316 last->next = dst; 317 else 318 dst_busy_list = dst; 319 for (; dst; dst = dst->next) { 320 dst_ifdown(dst, dev, event != NETDEV_DOWN); 321 } 322 mutex_unlock(&dst_gc_mutex); 323 break; 324 } 325 return NOTIFY_DONE; 326 } 327 328 static struct notifier_block dst_dev_notifier = { 329 .notifier_call = dst_dev_event, 330 }; 331 332 void __init dst_init(void) 333 { 334 register_netdevice_notifier(&dst_dev_notifier); 335 } 336 337 EXPORT_SYMBOL(__dst_free); 338 EXPORT_SYMBOL(dst_alloc); 339 EXPORT_SYMBOL(dst_destroy); 340