1 /* 2 * net/dst.h Protocol independent destination cache definitions. 3 * 4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 5 * 6 */ 7 8 #ifndef _NET_DST_H 9 #define _NET_DST_H 10 11 #include <net/dst_ops.h> 12 #include <linux/netdevice.h> 13 #include <linux/rtnetlink.h> 14 #include <linux/rcupdate.h> 15 #include <linux/bug.h> 16 #include <linux/jiffies.h> 17 #include <net/neighbour.h> 18 #include <asm/processor.h> 19 20 #define DST_GC_MIN (HZ/10) 21 #define DST_GC_INC (HZ/2) 22 #define DST_GC_MAX (120*HZ) 23 24 /* Each dst_entry has reference count and sits in some parent list(s). 25 * When it is removed from parent list, it is "freed" (dst_free). 26 * After this it enters dead state (dst->obsolete > 0) and if its refcnt 27 * is zero, it can be destroyed immediately, otherwise it is added 28 * to gc list and garbage collector periodically checks the refcnt. 29 */ 30 31 struct sk_buff; 32 33 struct dst_entry { 34 struct rcu_head rcu_head; 35 struct dst_entry *child; 36 struct net_device *dev; 37 struct dst_ops *ops; 38 unsigned long _metrics; 39 unsigned long expires; 40 struct dst_entry *path; 41 struct dst_entry *from; 42 #ifdef CONFIG_XFRM 43 struct xfrm_state *xfrm; 44 #else 45 void *__pad1; 46 #endif 47 int (*input)(struct sk_buff *); 48 int (*output)(struct sock *sk, struct sk_buff *skb); 49 50 unsigned short flags; 51 #define DST_HOST 0x0001 52 #define DST_NOXFRM 0x0002 53 #define DST_NOPOLICY 0x0004 54 #define DST_NOHASH 0x0008 55 #define DST_NOCACHE 0x0010 56 #define DST_NOCOUNT 0x0020 57 #define DST_FAKE_RTABLE 0x0040 58 #define DST_XFRM_TUNNEL 0x0080 59 #define DST_XFRM_QUEUE 0x0100 60 61 unsigned short pending_confirm; 62 63 short error; 64 65 /* A non-zero value of dst->obsolete forces by-hand validation 66 * of the route entry. Positive values are set by the generic 67 * dst layer to indicate that the entry has been forcefully 68 * destroyed. 69 * 70 * Negative values are used by the implementation layer code to 71 * force invocation of the dst_ops->check() method. 72 */ 73 short obsolete; 74 #define DST_OBSOLETE_NONE 0 75 #define DST_OBSOLETE_DEAD 2 76 #define DST_OBSOLETE_FORCE_CHK -1 77 #define DST_OBSOLETE_KILL -2 78 unsigned short header_len; /* more space at head required */ 79 unsigned short trailer_len; /* space to reserve at tail */ 80 #ifdef CONFIG_IP_ROUTE_CLASSID 81 __u32 tclassid; 82 #else 83 __u32 __pad2; 84 #endif 85 86 /* 87 * Align __refcnt to a 64 bytes alignment 88 * (L1_CACHE_SIZE would be too much) 89 */ 90 #ifdef CONFIG_64BIT 91 long __pad_to_align_refcnt[2]; 92 #endif 93 /* 94 * __refcnt wants to be on a different cache line from 95 * input/output/ops or performance tanks badly 96 */ 97 atomic_t __refcnt; /* client references */ 98 int __use; 99 unsigned long lastuse; 100 union { 101 struct dst_entry *next; 102 struct rtable __rcu *rt_next; 103 struct rt6_info *rt6_next; 104 struct dn_route __rcu *dn_next; 105 }; 106 }; 107 108 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); 109 extern const u32 dst_default_metrics[]; 110 111 #define DST_METRICS_READ_ONLY 0x1UL 112 #define DST_METRICS_FORCE_OVERWRITE 0x2UL 113 #define DST_METRICS_FLAGS 0x3UL 114 #define __DST_METRICS_PTR(Y) \ 115 ((u32 *)((Y) & ~DST_METRICS_FLAGS)) 116 #define DST_METRICS_PTR(X) __DST_METRICS_PTR((X)->_metrics) 117 118 static inline bool dst_metrics_read_only(const struct dst_entry *dst) 119 { 120 return dst->_metrics & DST_METRICS_READ_ONLY; 121 } 122 123 static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst) 124 { 125 dst->_metrics |= DST_METRICS_FORCE_OVERWRITE; 126 } 127 128 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); 129 130 static inline void dst_destroy_metrics_generic(struct dst_entry *dst) 131 { 132 unsigned long val = dst->_metrics; 133 if (!(val & DST_METRICS_READ_ONLY)) 134 __dst_destroy_metrics_generic(dst, val); 135 } 136 137 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst) 138 { 139 unsigned long p = dst->_metrics; 140 141 BUG_ON(!p); 142 143 if (p & DST_METRICS_READ_ONLY) 144 return dst->ops->cow_metrics(dst, p); 145 return __DST_METRICS_PTR(p); 146 } 147 148 /* This may only be invoked before the entry has reached global 149 * visibility. 150 */ 151 static inline void dst_init_metrics(struct dst_entry *dst, 152 const u32 *src_metrics, 153 bool read_only) 154 { 155 dst->_metrics = ((unsigned long) src_metrics) | 156 (read_only ? DST_METRICS_READ_ONLY : 0); 157 } 158 159 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src) 160 { 161 u32 *dst_metrics = dst_metrics_write_ptr(dest); 162 163 if (dst_metrics) { 164 u32 *src_metrics = DST_METRICS_PTR(src); 165 166 memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32)); 167 } 168 } 169 170 static inline u32 *dst_metrics_ptr(struct dst_entry *dst) 171 { 172 return DST_METRICS_PTR(dst); 173 } 174 175 static inline u32 176 dst_metric_raw(const struct dst_entry *dst, const int metric) 177 { 178 u32 *p = DST_METRICS_PTR(dst); 179 180 return p[metric-1]; 181 } 182 183 static inline u32 184 dst_metric(const struct dst_entry *dst, const int metric) 185 { 186 WARN_ON_ONCE(metric == RTAX_HOPLIMIT || 187 metric == RTAX_ADVMSS || 188 metric == RTAX_MTU); 189 return dst_metric_raw(dst, metric); 190 } 191 192 static inline u32 193 dst_metric_advmss(const struct dst_entry *dst) 194 { 195 u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS); 196 197 if (!advmss) 198 advmss = dst->ops->default_advmss(dst); 199 200 return advmss; 201 } 202 203 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val) 204 { 205 u32 *p = dst_metrics_write_ptr(dst); 206 207 if (p) 208 p[metric-1] = val; 209 } 210 211 static inline u32 212 dst_feature(const struct dst_entry *dst, u32 feature) 213 { 214 return dst_metric(dst, RTAX_FEATURES) & feature; 215 } 216 217 static inline u32 dst_mtu(const struct dst_entry *dst) 218 { 219 return dst->ops->mtu(dst); 220 } 221 222 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */ 223 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric) 224 { 225 return msecs_to_jiffies(dst_metric(dst, metric)); 226 } 227 228 static inline u32 229 dst_allfrag(const struct dst_entry *dst) 230 { 231 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG); 232 return ret; 233 } 234 235 static inline int 236 dst_metric_locked(const struct dst_entry *dst, int metric) 237 { 238 return dst_metric(dst, RTAX_LOCK) & (1<<metric); 239 } 240 241 static inline void dst_hold(struct dst_entry *dst) 242 { 243 /* 244 * If your kernel compilation stops here, please check 245 * __pad_to_align_refcnt declaration in struct dst_entry 246 */ 247 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63); 248 atomic_inc(&dst->__refcnt); 249 } 250 251 static inline void dst_use(struct dst_entry *dst, unsigned long time) 252 { 253 dst_hold(dst); 254 dst->__use++; 255 dst->lastuse = time; 256 } 257 258 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time) 259 { 260 dst->__use++; 261 dst->lastuse = time; 262 } 263 264 static inline struct dst_entry *dst_clone(struct dst_entry *dst) 265 { 266 if (dst) 267 atomic_inc(&dst->__refcnt); 268 return dst; 269 } 270 271 void dst_release(struct dst_entry *dst); 272 273 static inline void refdst_drop(unsigned long refdst) 274 { 275 if (!(refdst & SKB_DST_NOREF)) 276 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK)); 277 } 278 279 /** 280 * skb_dst_drop - drops skb dst 281 * @skb: buffer 282 * 283 * Drops dst reference count if a reference was taken. 284 */ 285 static inline void skb_dst_drop(struct sk_buff *skb) 286 { 287 if (skb->_skb_refdst) { 288 refdst_drop(skb->_skb_refdst); 289 skb->_skb_refdst = 0UL; 290 } 291 } 292 293 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb) 294 { 295 nskb->_skb_refdst = oskb->_skb_refdst; 296 if (!(nskb->_skb_refdst & SKB_DST_NOREF)) 297 dst_clone(skb_dst(nskb)); 298 } 299 300 /** 301 * skb_dst_force - makes sure skb dst is refcounted 302 * @skb: buffer 303 * 304 * If dst is not yet refcounted, let's do it 305 */ 306 static inline void skb_dst_force(struct sk_buff *skb) 307 { 308 if (skb_dst_is_noref(skb)) { 309 WARN_ON(!rcu_read_lock_held()); 310 skb->_skb_refdst &= ~SKB_DST_NOREF; 311 dst_clone(skb_dst(skb)); 312 } 313 } 314 315 316 /** 317 * __skb_tunnel_rx - prepare skb for rx reinsert 318 * @skb: buffer 319 * @dev: tunnel device 320 * @net: netns for packet i/o 321 * 322 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 323 * so make some cleanups. (no accounting done) 324 */ 325 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, 326 struct net *net) 327 { 328 skb->dev = dev; 329 330 /* 331 * Clear hash so that we can recalulate the hash for the 332 * encapsulated packet, unless we have already determine the hash 333 * over the L4 4-tuple. 334 */ 335 skb_clear_hash_if_not_l4(skb); 336 skb_set_queue_mapping(skb, 0); 337 skb_scrub_packet(skb, !net_eq(net, dev_net(dev))); 338 } 339 340 /** 341 * skb_tunnel_rx - prepare skb for rx reinsert 342 * @skb: buffer 343 * @dev: tunnel device 344 * 345 * After decapsulation, packet is going to re-enter (netif_rx()) our stack, 346 * so make some cleanups, and perform accounting. 347 * Note: this accounting is not SMP safe. 348 */ 349 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, 350 struct net *net) 351 { 352 /* TODO : stats should be SMP safe */ 353 dev->stats.rx_packets++; 354 dev->stats.rx_bytes += skb->len; 355 __skb_tunnel_rx(skb, dev, net); 356 } 357 358 /* Children define the path of the packet through the 359 * Linux networking. Thus, destinations are stackable. 360 */ 361 362 static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) 363 { 364 struct dst_entry *child = dst_clone(skb_dst(skb)->child); 365 366 skb_dst_drop(skb); 367 return child; 368 } 369 370 int dst_discard_sk(struct sock *sk, struct sk_buff *skb); 371 static inline int dst_discard(struct sk_buff *skb) 372 { 373 return dst_discard_sk(skb->sk, skb); 374 } 375 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, 376 int initial_obsolete, unsigned short flags); 377 void __dst_free(struct dst_entry *dst); 378 struct dst_entry *dst_destroy(struct dst_entry *dst); 379 380 static inline void dst_free(struct dst_entry *dst) 381 { 382 if (dst->obsolete > 0) 383 return; 384 if (!atomic_read(&dst->__refcnt)) { 385 dst = dst_destroy(dst); 386 if (!dst) 387 return; 388 } 389 __dst_free(dst); 390 } 391 392 static inline void dst_rcu_free(struct rcu_head *head) 393 { 394 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head); 395 dst_free(dst); 396 } 397 398 static inline void dst_confirm(struct dst_entry *dst) 399 { 400 dst->pending_confirm = 1; 401 } 402 403 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, 404 struct sk_buff *skb) 405 { 406 const struct hh_cache *hh; 407 408 if (dst->pending_confirm) { 409 unsigned long now = jiffies; 410 411 dst->pending_confirm = 0; 412 /* avoid dirtying neighbour */ 413 if (n->confirmed != now) 414 n->confirmed = now; 415 } 416 417 hh = &n->hh; 418 if ((n->nud_state & NUD_CONNECTED) && hh->hh_len) 419 return neigh_hh_output(hh, skb); 420 else 421 return n->output(n, skb); 422 } 423 424 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) 425 { 426 struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); 427 return IS_ERR(n) ? NULL : n; 428 } 429 430 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, 431 struct sk_buff *skb) 432 { 433 struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); 434 return IS_ERR(n) ? NULL : n; 435 } 436 437 static inline void dst_link_failure(struct sk_buff *skb) 438 { 439 struct dst_entry *dst = skb_dst(skb); 440 if (dst && dst->ops && dst->ops->link_failure) 441 dst->ops->link_failure(skb); 442 } 443 444 static inline void dst_set_expires(struct dst_entry *dst, int timeout) 445 { 446 unsigned long expires = jiffies + timeout; 447 448 if (expires == 0) 449 expires = 1; 450 451 if (dst->expires == 0 || time_before(expires, dst->expires)) 452 dst->expires = expires; 453 } 454 455 /* Output packet to network from transport. */ 456 static inline int dst_output_sk(struct sock *sk, struct sk_buff *skb) 457 { 458 return skb_dst(skb)->output(sk, skb); 459 } 460 static inline int dst_output(struct sk_buff *skb) 461 { 462 return dst_output_sk(skb->sk, skb); 463 } 464 465 /* Input packet from network to transport. */ 466 static inline int dst_input(struct sk_buff *skb) 467 { 468 return skb_dst(skb)->input(skb); 469 } 470 471 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie) 472 { 473 if (dst->obsolete) 474 dst = dst->ops->check(dst, cookie); 475 return dst; 476 } 477 478 void dst_init(void); 479 480 /* Flags for xfrm_lookup flags argument. */ 481 enum { 482 XFRM_LOOKUP_ICMP = 1 << 0, 483 XFRM_LOOKUP_QUEUE = 1 << 1, 484 XFRM_LOOKUP_KEEP_DST_REF = 1 << 2, 485 }; 486 487 struct flowi; 488 #ifndef CONFIG_XFRM 489 static inline struct dst_entry *xfrm_lookup(struct net *net, 490 struct dst_entry *dst_orig, 491 const struct flowi *fl, struct sock *sk, 492 int flags) 493 { 494 return dst_orig; 495 } 496 497 static inline struct dst_entry *xfrm_lookup_route(struct net *net, 498 struct dst_entry *dst_orig, 499 const struct flowi *fl, 500 struct sock *sk, 501 int flags) 502 { 503 return dst_orig; 504 } 505 506 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) 507 { 508 return NULL; 509 } 510 511 #else 512 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 513 const struct flowi *fl, struct sock *sk, 514 int flags); 515 516 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 517 const struct flowi *fl, struct sock *sk, 518 int flags); 519 520 /* skb attached with this dst needs transformation if dst->xfrm is valid */ 521 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) 522 { 523 return dst->xfrm; 524 } 525 #endif 526 527 #endif /* _NET_DST_H */ 528