1 /* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #ifndef _NET_DST_H 9 #define _NET_DST_H 10 11 #include <net/dst_ops.h> 12 #include <linux/netdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/rcupdate.h> 15 #include <linux/jiffies.h> 16 #include <net/neighbour.h> 17 #include <asm/processor.h> 18 19 #define DST_GC_MIN (HZ/10) 20 #define DST_GC_INC (HZ/2) 21 #define DST_GC_MAX (120*HZ) 22 23 /* Each dst_entry has reference count and sits in some parent list(s). 24 * When it is removed from parent list, it is "freed" (dst_free). 25 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 26 * is zero, it can be destroyed immediately, otherwise it is added 27 * to gc list and garbage collector periodically checks the refcnt. 28 */ 29 30 struct sk_buff; 31 32 struct dst_entry { 33 struct rcu_head rcu_head; 34 struct dst_entry *child; 35 struct net_device *dev; 36 struct dst_ops *ops; 37 unsigned long _metrics; 38 unsigned long expires; 39 struct dst_entry *path; 40 struct neighbour *neighbour; 41 struct hh_cache *hh; 42 #ifdef CONFIG_XFRM 43 struct xfrm_state *xfrm; 44 #else 45 void *__pad1; 46 #endif 47 int (*input)(struct sk_buff*); 48 int (*output)(struct sk_buff*); 49 50 short error; 51 short obsolete; 52 unsigned short header_len; /* more space at head required */ 53 unsigned short trailer_len; /* space to reserve at tail */ 54 #ifdef CONFIG_IP_ROUTE_CLASSID 55 __u32 tclassid; 56 #else 57 __u32 __pad2; 58 #endif 59 60 /* 61 * Align __refcnt to a 64 bytes alignment 62 * (L1_CACHE_SIZE would be too much) 63 */ 64 #ifdef CONFIG_64BIT 65 long __pad_to_align_refcnt[1]; 66 #endif 67 /* 68 * __refcnt wants to be on a different cache line from 69 * input/output/ops or performance tanks badly 70 */ 71 atomic_t __refcnt; /* client references */ 72 int __use; 73 unsigned long lastuse; 74 int flags; 75 #define DST_HOST 0x0001 76 #define DST_NOXFRM 0x0002 77 #define DST_NOPOLICY 0x0004 78 #define DST_NOHASH 0x0008 79 #define DST_NOCACHE 0x0010 80 union { 81 struct dst_entry *next; 82 struct rtable __rcu *rt_next; 83 struct rt6_info *rt6_next; 84 struct dn_route __rcu *dn_next; 85 }; 86 }; 87 88 extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); 89 extern const u32 dst_default_metrics[RTAX_MAX]; 90 91 #define DST_METRICS_READ_ONLY 0x1UL 92 #define __DST_METRICS_PTR(Y) \ 93 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY)) 94 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) 95 96 static inline bool dst_metrics_read_only(const struct dst_entry *dst) 97 { 98 return dst->_metrics & DST_METRICS_READ_ONLY; 99 } 100 101 extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); 102 103 static inline void dst_destroy_metrics_generic(struct dst_entry *dst) 104 { 105 unsigned long val = dst->_metrics; 106 if (!(val & DST_METRICS_READ_ONLY)) 107 __dst_destroy_metrics_generic(dst, val); 108 } 109 110 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) 111 { 112 unsigned long p = dst->_metrics; 113 114 BUG_ON(!p); 115 116 if (p & DST_METRICS_READ_ONLY) 117 return dst->ops->cow_metrics(dst, p); 118 return __DST_METRICS_PTR(p); 119 } 120 121 /* This may only be invoked before the entry has reached global 122 * visibility. 123 */ 124 static inline void dst_init_metrics(struct dst_entry *dst, 125 const u32 *src_metrics, 126 bool read_only) 127 { 128 dst->_metrics = ((unsigned long) src_metrics) | 129 (read_only ? DST_METRICS_READ_ONLY : 0); 130 } 131 132 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) 133 { 134 u32 *dst_metrics = dst_metrics_write_ptr(dest); 135 136 if (dst_metrics) { 137 u32 *src_metrics = DST_METRICS_PTR(src); 138 139 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); 140 } 141 } 142 143 static inline u32 *dst_metrics_ptr(struct dst_entry *dst) 144 { 145 return DST_METRICS_PTR(dst); 146 } 147 148 static inline u32 149 dst_metric_raw(const struct dst_entry *dst, const int metric) 150 { 151 u32 *p = DST_METRICS_PTR(dst); 152 153 return p[metric-1]; 154 } 155 156 static inline u32 157 dst_metric(const struct dst_entry *dst, const int metric) 158 { 159 WARN_ON_ONCE(metric == RTAX_HOPLIMIT || 160 metric == RTAX_ADVMSS || 161 metric == RTAX_MTU); 162 return dst_metric_raw(dst, metric); 163 } 164 165 static inline u32 166 dst_metric_advmss(const struct dst_entry *dst) 167 { 168 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); 169 170 if (!advmss) 171 advmss = dst->ops->default_advmss(dst); 172 173 return advmss; 174 } 175 176 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) 177 { 178 u32 *p = dst_metrics_write_ptr(dst); 179 180 if (p) 181 p[metric-1] = val; 182 } 183 184 static inline u32 185 dst_feature(const struct dst_entry *dst, u32 feature) 186 { 187 return dst_metric(dst, RTAX_FEATURES) & feature; 188 } 189 190 static inline u32 dst_mtu(const struct dst_entry *dst) 191 { 192 u32 mtu = dst_metric_raw(dst, RTAX_MTU); 193 194 if (!mtu) 195 mtu = dst->ops->default_mtu(dst); 196 197 return mtu; 198 } 199 200 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 201 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 202 { 203 return msecs_to_jiffies(dst_metric(dst, metric)); 204 } 205 206 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 207 unsigned long rtt) 208 { 209 dst_metric_set(dst, metric, jiffies_to_msecs(rtt)); 210 } 211 212 static inline u32 213 dst_allfrag(const struct dst_entry *dst) 214 { 215 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); 216 return ret; 217 } 218 219 static inline int 220 dst_metric_locked(const struct dst_entry *dst, int metric) 221 { 222 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 223 } 224 225 static inline void dst_hold(struct dst_entry * dst) 226 { 227 /* 228 * If your kernel compilation stops here, please check 229 * __pad_to_align_refcnt declaration in struct dst_entry 230 */ 231 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 232 atomic_inc(&dst->__refcnt); 233 } 234 235 static inline void dst_use(struct dst_entry *dst, unsigned long time) 236 { 237 dst_hold(dst); 238 dst->__use++; 239 dst->lastuse = time; 240 } 241 242 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) 243 { 244 dst->__use++; 245 dst->lastuse = time; 246 } 247 248 static inline 249 struct dst_entry * dst_clone(struct dst_entry * dst) 250 { 251 if (dst) 252 atomic_inc(&dst->__refcnt); 253 return dst; 254 } 255 256 extern void dst_release(struct dst_entry *dst); 257 258 static inline void refdst_drop(unsigned long refdst) 259 { 260 if (!(refdst & SKB_DST_NOREF)) 261 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); 262 } 263 264 /** 265 * skb_dst_drop - drops skb dst 266 * @skb: buffer 267 * 268 * Drops dst reference count if a reference was taken. 269 */ 270 static inline void skb_dst_drop(struct sk_buff *skb) 271 { 272 if (skb->_skb_refdst) { 273 refdst_drop(skb->_skb_refdst); 274 skb->_skb_refdst = 0UL; 275 } 276 } 277 278 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 279 { 280 nskb->_skb_refdst = oskb->_skb_refdst; 281 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 282 dst_clone(skb_dst(nskb)); 283 } 284 285 /** 286 * skb_dst_force - makes sure skb dst is refcounted 287 * @skb: buffer 288 * 289 * If dst is not yet refcounted, let's do it 290 */ 291 static inline void skb_dst_force(struct sk_buff *skb) 292 { 293 if (skb_dst_is_noref(skb)) { 294 WARN_ON(!rcu_read_lock_held()); 295 skb->_skb_refdst &= ~SKB_DST_NOREF; 296 dst_clone(skb_dst(skb)); 297 } 298 } 299 300 301 /** 302 * __skb_tunnel_rx - prepare skb for rx reinsert 303 * @skb: buffer 304 * @dev: tunnel device 305 * 306 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 307 * so make some cleanups. (no accounting done) 308 */ 309 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 310 { 311 skb->dev = dev; 312 skb->rxhash = 0; 313 skb_set_queue_mapping(skb, 0); 314 skb_dst_drop(skb); 315 nf_reset(skb); 316 } 317 318 /** 319 * skb_tunnel_rx - prepare skb for rx reinsert 320 * @skb: buffer 321 * @dev: tunnel device 322 * 323 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 324 * so make some cleanups, and perform accounting. 325 * Note: this accounting is not SMP safe. 326 */ 327 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 328 { 329 /* TODO : stats should be SMP safe */ 330 dev->stats.rx_packets++; 331 dev->stats.rx_bytes += skb->len; 332 __skb_tunnel_rx(skb, dev); 333 } 334 335 /* Children define the path of the packet through the 336 * Linux networking. Thus, destinations are stackable. 337 */ 338 339 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 340 { 341 struct dst_entry *child = dst_clone(skb_dst(skb)->child); 342 343 skb_dst_drop(skb); 344 return child; 345 } 346 347 extern int dst_discard(struct sk_buff *skb); 348 extern void *dst_alloc(struct dst_ops * ops, struct net_device *dev, 349 int initial_ref, int initial_obsolete, int flags); 350 extern void __dst_free(struct dst_entry * dst); 351 extern struct dst_entry *dst_destroy(struct dst_entry * dst); 352 353 static inline void dst_free(struct dst_entry * dst) 354 { 355 if (dst->obsolete > 1) 356 return; 357 if (!atomic_read(&dst->__refcnt)) { 358 dst = dst_destroy(dst); 359 if (!dst) 360 return; 361 } 362 __dst_free(dst); 363 } 364 365 static inline void dst_rcu_free(struct rcu_head *head) 366 { 367 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 368 dst_free(dst); 369 } 370 371 static inline void dst_confirm(struct dst_entry *dst) 372 { 373 if (dst) 374 neigh_confirm(dst->neighbour); 375 } 376 377 static inline void dst_link_failure(struct sk_buff *skb) 378 { 379 struct dst_entry *dst = skb_dst(skb); 380 if (dst && dst->ops && dst->ops->link_failure) 381 dst->ops->link_failure(skb); 382 } 383 384 static inline void dst_set_expires(struct dst_entry *dst, int timeout) 385 { 386 unsigned long expires = jiffies + timeout; 387 388 if (expires == 0) 389 expires = 1; 390 391 if (dst->expires == 0 || time_before(expires, dst->expires)) 392 dst->expires = expires; 393 } 394 395 /* Output packet to network from transport. */ 396 static inline int dst_output(struct sk_buff *skb) 397 { 398 return skb_dst(skb)->output(skb); 399 } 400 401 /* Input packet from network to transport. */ 402 static inline int dst_input(struct sk_buff *skb) 403 { 404 return skb_dst(skb)->input(skb); 405 } 406 407 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 408 { 409 if (dst->obsolete) 410 dst = dst->ops->check(dst, cookie); 411 return dst; 412 } 413 414 extern void dst_init(void); 415 416 /* Flags for xfrm_lookup flags argument. */ 417 enum { 418 XFRM_LOOKUP_ICMP = 1 << 0, 419 }; 420 421 struct flowi; 422 #ifndef CONFIG_XFRM 423 static inline struct dst_entry *xfrm_lookup(struct net *net, 424 struct dst_entry *dst_orig, 425 const struct flowi *fl, struct sock *sk, 426 int flags) 427 { 428 return dst_orig; 429 } 430 #else 431 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 432 const struct flowi *fl, struct sock *sk, 433 int flags); 434 #endif 435 436 #endif /* _NET_DST_H */ 437