1 /* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #ifndef _NET_DST_H 9 #define _NET_DST_H 10 11 #include <net/dst_ops.h> 12 #include <linux/netdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/rcupdate.h> 15 #include <linux/jiffies.h> 16 #include <net/neighbour.h> 17 #include <asm/processor.h> 18 19 #define DST_GC_MIN (HZ/10) 20 #define DST_GC_INC (HZ/2) 21 #define DST_GC_MAX (120*HZ) 22 23 /* Each dst_entry has reference count and sits in some parent list(s). 24 * When it is removed from parent list, it is "freed" (dst_free). 25 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 26 * is zero, it can be destroyed immediately, otherwise it is added 27 * to gc list and garbage collector periodically checks the refcnt. 28 */ 29 30 struct sk_buff; 31 32 struct dst_entry { 33 struct rcu_head rcu_head; 34 struct dst_entry *child; 35 struct net_device *dev; 36 struct dst_ops *ops; 37 unsigned long _metrics; 38 unsigned long expires; 39 struct dst_entry *path; 40 struct neighbour *_neighbour; 41 #ifdef CONFIG_XFRM 42 struct xfrm_state *xfrm; 43 #else 44 void *__pad1; 45 #endif 46 int (*input)(struct sk_buff*); 47 int (*output)(struct sk_buff*); 48 49 int flags; 50 #define DST_HOST 0x0001 51 #define DST_NOXFRM 0x0002 52 #define DST_NOPOLICY 0x0004 53 #define DST_NOHASH 0x0008 54 #define DST_NOCACHE 0x0010 55 #define DST_NOCOUNT 0x0020 56 57 short error; 58 short obsolete; 59 unsigned short header_len; /* more space at head required */ 60 unsigned short trailer_len; /* space to reserve at tail */ 61 #ifdef CONFIG_IP_ROUTE_CLASSID 62 __u32 tclassid; 63 #else 64 __u32 __pad2; 65 #endif 66 67 /* 68 * Align __refcnt to a 64 bytes alignment 69 * (L1_CACHE_SIZE would be too much) 70 */ 71 #ifdef CONFIG_64BIT 72 long __pad_to_align_refcnt[2]; 73 #endif 74 /* 75 * __refcnt wants to be on a different cache line from 76 * input/output/ops or performance tanks badly 77 */ 78 atomic_t __refcnt; /* client references */ 79 int __use; 80 unsigned long lastuse; 81 union { 82 struct dst_entry *next; 83 struct rtable __rcu *rt_next; 84 struct rt6_info *rt6_next; 85 struct dn_route __rcu *dn_next; 86 }; 87 }; 88 89 static inline struct neighbour *dst_get_neighbour(struct dst_entry *dst) 90 { 91 return dst->_neighbour; 92 } 93 94 static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh) 95 { 96 dst->_neighbour = neigh; 97 } 98 99 extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); 100 extern const u32 dst_default_metrics[RTAX_MAX]; 101 102 #define DST_METRICS_READ_ONLY 0x1UL 103 #define __DST_METRICS_PTR(Y) \ 104 ((u32 *)((Y) & ~DST_METRICS_READ_ONLY)) 105 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) 106 107 static inline bool dst_metrics_read_only(const struct dst_entry *dst) 108 { 109 return dst->_metrics & DST_METRICS_READ_ONLY; 110 } 111 112 extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); 113 114 static inline void dst_destroy_metrics_generic(struct dst_entry *dst) 115 { 116 unsigned long val = dst->_metrics; 117 if (!(val & DST_METRICS_READ_ONLY)) 118 __dst_destroy_metrics_generic(dst, val); 119 } 120 121 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) 122 { 123 unsigned long p = dst->_metrics; 124 125 BUG_ON(!p); 126 127 if (p & DST_METRICS_READ_ONLY) 128 return dst->ops->cow_metrics(dst, p); 129 return __DST_METRICS_PTR(p); 130 } 131 132 /* This may only be invoked before the entry has reached global 133 * visibility. 134 */ 135 static inline void dst_init_metrics(struct dst_entry *dst, 136 const u32 *src_metrics, 137 bool read_only) 138 { 139 dst->_metrics = ((unsigned long) src_metrics) | 140 (read_only ? DST_METRICS_READ_ONLY : 0); 141 } 142 143 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) 144 { 145 u32 *dst_metrics = dst_metrics_write_ptr(dest); 146 147 if (dst_metrics) { 148 u32 *src_metrics = DST_METRICS_PTR(src); 149 150 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); 151 } 152 } 153 154 static inline u32 *dst_metrics_ptr(struct dst_entry *dst) 155 { 156 return DST_METRICS_PTR(dst); 157 } 158 159 static inline u32 160 dst_metric_raw(const struct dst_entry *dst, const int metric) 161 { 162 u32 *p = DST_METRICS_PTR(dst); 163 164 return p[metric-1]; 165 } 166 167 static inline u32 168 dst_metric(const struct dst_entry *dst, const int metric) 169 { 170 WARN_ON_ONCE(metric == RTAX_HOPLIMIT || 171 metric == RTAX_ADVMSS || 172 metric == RTAX_MTU); 173 return dst_metric_raw(dst, metric); 174 } 175 176 static inline u32 177 dst_metric_advmss(const struct dst_entry *dst) 178 { 179 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); 180 181 if (!advmss) 182 advmss = dst->ops->default_advmss(dst); 183 184 return advmss; 185 } 186 187 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) 188 { 189 u32 *p = dst_metrics_write_ptr(dst); 190 191 if (p) 192 p[metric-1] = val; 193 } 194 195 static inline u32 196 dst_feature(const struct dst_entry *dst, u32 feature) 197 { 198 return dst_metric(dst, RTAX_FEATURES) & feature; 199 } 200 201 static inline u32 dst_mtu(const struct dst_entry *dst) 202 { 203 u32 mtu = dst_metric_raw(dst, RTAX_MTU); 204 205 if (!mtu) 206 mtu = dst->ops->default_mtu(dst); 207 208 return mtu; 209 } 210 211 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 212 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 213 { 214 return msecs_to_jiffies(dst_metric(dst, metric)); 215 } 216 217 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric, 218 unsigned long rtt) 219 { 220 dst_metric_set(dst, metric, jiffies_to_msecs(rtt)); 221 } 222 223 static inline u32 224 dst_allfrag(const struct dst_entry *dst) 225 { 226 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); 227 return ret; 228 } 229 230 static inline int 231 dst_metric_locked(const struct dst_entry *dst, int metric) 232 { 233 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 234 } 235 236 static inline void dst_hold(struct dst_entry * dst) 237 { 238 /* 239 * If your kernel compilation stops here, please check 240 * __pad_to_align_refcnt declaration in struct dst_entry 241 */ 242 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 243 atomic_inc(&dst->__refcnt); 244 } 245 246 static inline void dst_use(struct dst_entry *dst, unsigned long time) 247 { 248 dst_hold(dst); 249 dst->__use++; 250 dst->lastuse = time; 251 } 252 253 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) 254 { 255 dst->__use++; 256 dst->lastuse = time; 257 } 258 259 static inline 260 struct dst_entry * dst_clone(struct dst_entry * dst) 261 { 262 if (dst) 263 atomic_inc(&dst->__refcnt); 264 return dst; 265 } 266 267 extern void dst_release(struct dst_entry *dst); 268 269 static inline void refdst_drop(unsigned long refdst) 270 { 271 if (!(refdst & SKB_DST_NOREF)) 272 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); 273 } 274 275 /** 276 * skb_dst_drop - drops skb dst 277 * @skb: buffer 278 * 279 * Drops dst reference count if a reference was taken. 280 */ 281 static inline void skb_dst_drop(struct sk_buff *skb) 282 { 283 if (skb->_skb_refdst) { 284 refdst_drop(skb->_skb_refdst); 285 skb->_skb_refdst = 0UL; 286 } 287 } 288 289 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 290 { 291 nskb->_skb_refdst = oskb->_skb_refdst; 292 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 293 dst_clone(skb_dst(nskb)); 294 } 295 296 /** 297 * skb_dst_force - makes sure skb dst is refcounted 298 * @skb: buffer 299 * 300 * If dst is not yet refcounted, let's do it 301 */ 302 static inline void skb_dst_force(struct sk_buff *skb) 303 { 304 if (skb_dst_is_noref(skb)) { 305 WARN_ON(!rcu_read_lock_held()); 306 skb->_skb_refdst &= ~SKB_DST_NOREF; 307 dst_clone(skb_dst(skb)); 308 } 309 } 310 311 312 /** 313 * __skb_tunnel_rx - prepare skb for rx reinsert 314 * @skb: buffer 315 * @dev: tunnel device 316 * 317 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 318 * so make some cleanups. (no accounting done) 319 */ 320 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 321 { 322 skb->dev = dev; 323 skb->rxhash = 0; 324 skb_set_queue_mapping(skb, 0); 325 skb_dst_drop(skb); 326 nf_reset(skb); 327 } 328 329 /** 330 * skb_tunnel_rx - prepare skb for rx reinsert 331 * @skb: buffer 332 * @dev: tunnel device 333 * 334 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 335 * so make some cleanups, and perform accounting. 336 * Note: this accounting is not SMP safe. 337 */ 338 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) 339 { 340 /* TODO : stats should be SMP safe */ 341 dev->stats.rx_packets++; 342 dev->stats.rx_bytes += skb->len; 343 __skb_tunnel_rx(skb, dev); 344 } 345 346 /* Children define the path of the packet through the 347 * Linux networking. Thus, destinations are stackable. 348 */ 349 350 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 351 { 352 struct dst_entry *child = dst_clone(skb_dst(skb)->child); 353 354 skb_dst_drop(skb); 355 return child; 356 } 357 358 extern int dst_discard(struct sk_buff *skb); 359 extern void *dst_alloc(struct dst_ops * ops, struct net_device *dev, 360 int initial_ref, int initial_obsolete, int flags); 361 extern void __dst_free(struct dst_entry * dst); 362 extern struct dst_entry *dst_destroy(struct dst_entry * dst); 363 364 static inline void dst_free(struct dst_entry * dst) 365 { 366 if (dst->obsolete > 1) 367 return; 368 if (!atomic_read(&dst->__refcnt)) { 369 dst = dst_destroy(dst); 370 if (!dst) 371 return; 372 } 373 __dst_free(dst); 374 } 375 376 static inline void dst_rcu_free(struct rcu_head *head) 377 { 378 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 379 dst_free(dst); 380 } 381 382 static inline void dst_confirm(struct dst_entry *dst) 383 { 384 if (dst) { 385 struct neighbour *n = dst_get_neighbour(dst); 386 neigh_confirm(n); 387 } 388 } 389 390 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 391 { 392 return dst->ops->neigh_lookup(dst, daddr); 393 } 394 395 static inline void dst_link_failure(struct sk_buff *skb) 396 { 397 struct dst_entry *dst = skb_dst(skb); 398 if (dst && dst->ops && dst->ops->link_failure) 399 dst->ops->link_failure(skb); 400 } 401 402 static inline void dst_set_expires(struct dst_entry *dst, int timeout) 403 { 404 unsigned long expires = jiffies + timeout; 405 406 if (expires == 0) 407 expires = 1; 408 409 if (dst->expires == 0 || time_before(expires, dst->expires)) 410 dst->expires = expires; 411 } 412 413 /* Output packet to network from transport. */ 414 static inline int dst_output(struct sk_buff *skb) 415 { 416 return skb_dst(skb)->output(skb); 417 } 418 419 /* Input packet from network to transport. */ 420 static inline int dst_input(struct sk_buff *skb) 421 { 422 return skb_dst(skb)->input(skb); 423 } 424 425 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 426 { 427 if (dst->obsolete) 428 dst = dst->ops->check(dst, cookie); 429 return dst; 430 } 431 432 extern void dst_init(void); 433 434 /* Flags for xfrm_lookup flags argument. */ 435 enum { 436 XFRM_LOOKUP_ICMP = 1 << 0, 437 }; 438 439 struct flowi; 440 #ifndef CONFIG_XFRM 441 static inline struct dst_entry *xfrm_lookup(struct net *net, 442 struct dst_entry *dst_orig, 443 const struct flowi *fl, struct sock *sk, 444 int flags) 445 { 446 return dst_orig; 447 } 448 #else 449 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 450 const struct flowi *fl, struct sock *sk, 451 int flags); 452 #endif 453 454 #endif /* _NET_DST_H */ 455