xref: /openbmc/linux/include/net/dst.h (revision a977d045)
1 /*
2  * net/dst.h	Protocol independent destination cache definitions.
3  *
4  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5  *
6  */
7 
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10 
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/bug.h>
16 #include <linux/jiffies.h>
17 #include <net/neighbour.h>
18 #include <asm/processor.h>
19 
20 #define DST_GC_MIN	(HZ/10)
21 #define DST_GC_INC	(HZ/2)
22 #define DST_GC_MAX	(120*HZ)
23 
24 /* Each dst_entry has reference count and sits in some parent list(s).
25  * When it is removed from parent list, it is "freed" (dst_free).
26  * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27  * is zero, it can be destroyed immediately, otherwise it is added
28  * to gc list and garbage collector periodically checks the refcnt.
29  */
30 
31 struct sk_buff;
32 
33 struct dst_entry {
34 	struct net_device       *dev;
35 	struct rcu_head		rcu_head;
36 	struct dst_entry	*child;
37 	struct  dst_ops	        *ops;
38 	unsigned long		_metrics;
39 	unsigned long           expires;
40 	struct dst_entry	*path;
41 	struct dst_entry	*from;
42 #ifdef CONFIG_XFRM
43 	struct xfrm_state	*xfrm;
44 #else
45 	void			*__pad1;
46 #endif
47 	int			(*input)(struct sk_buff *);
48 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
49 
50 	unsigned short		flags;
51 #define DST_HOST		0x0001
52 #define DST_NOXFRM		0x0002
53 #define DST_NOPOLICY		0x0004
54 #define DST_NOCOUNT		0x0008
55 #define DST_FAKE_RTABLE		0x0010
56 #define DST_XFRM_TUNNEL		0x0020
57 #define DST_XFRM_QUEUE		0x0040
58 #define DST_METADATA		0x0080
59 
60 	short			error;
61 
62 	/* A non-zero value of dst->obsolete forces by-hand validation
63 	 * of the route entry.  Positive values are set by the generic
64 	 * dst layer to indicate that the entry has been forcefully
65 	 * destroyed.
66 	 *
67 	 * Negative values are used by the implementation layer code to
68 	 * force invocation of the dst_ops->check() method.
69 	 */
70 	short			obsolete;
71 #define DST_OBSOLETE_NONE	0
72 #define DST_OBSOLETE_DEAD	2
73 #define DST_OBSOLETE_FORCE_CHK	-1
74 #define DST_OBSOLETE_KILL	-2
75 	unsigned short		header_len;	/* more space at head required */
76 	unsigned short		trailer_len;	/* space to reserve at tail */
77 	unsigned short		__pad3;
78 
79 #ifdef CONFIG_IP_ROUTE_CLASSID
80 	__u32			tclassid;
81 #else
82 	__u32			__pad2;
83 #endif
84 
85 #ifdef CONFIG_64BIT
86 	/*
87 	 * Align __refcnt to a 64 bytes alignment
88 	 * (L1_CACHE_SIZE would be too much)
89 	 */
90 	long			__pad_to_align_refcnt[2];
91 #endif
92 	/*
93 	 * __refcnt wants to be on a different cache line from
94 	 * input/output/ops or performance tanks badly
95 	 */
96 	atomic_t		__refcnt;	/* client references	*/
97 	int			__use;
98 	unsigned long		lastuse;
99 	struct lwtunnel_state   *lwtstate;
100 	union {
101 		struct dst_entry	*next;
102 		struct rtable __rcu	*rt_next;
103 		struct rt6_info		*rt6_next;
104 		struct dn_route __rcu	*dn_next;
105 	};
106 };
107 
108 struct dst_metrics {
109 	u32		metrics[RTAX_MAX];
110 	atomic_t	refcnt;
111 };
112 extern const struct dst_metrics dst_default_metrics;
113 
114 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
115 
116 #define DST_METRICS_READ_ONLY		0x1UL
117 #define DST_METRICS_REFCOUNTED		0x2UL
118 #define DST_METRICS_FLAGS		0x3UL
119 #define __DST_METRICS_PTR(Y)	\
120 	((u32 *)((Y) & ~DST_METRICS_FLAGS))
121 #define DST_METRICS_PTR(X)	__DST_METRICS_PTR((X)->_metrics)
122 
123 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
124 {
125 	return dst->_metrics & DST_METRICS_READ_ONLY;
126 }
127 
128 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
129 
130 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
131 {
132 	unsigned long val = dst->_metrics;
133 	if (!(val & DST_METRICS_READ_ONLY))
134 		__dst_destroy_metrics_generic(dst, val);
135 }
136 
137 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
138 {
139 	unsigned long p = dst->_metrics;
140 
141 	BUG_ON(!p);
142 
143 	if (p & DST_METRICS_READ_ONLY)
144 		return dst->ops->cow_metrics(dst, p);
145 	return __DST_METRICS_PTR(p);
146 }
147 
148 /* This may only be invoked before the entry has reached global
149  * visibility.
150  */
151 static inline void dst_init_metrics(struct dst_entry *dst,
152 				    const u32 *src_metrics,
153 				    bool read_only)
154 {
155 	dst->_metrics = ((unsigned long) src_metrics) |
156 		(read_only ? DST_METRICS_READ_ONLY : 0);
157 }
158 
159 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
160 {
161 	u32 *dst_metrics = dst_metrics_write_ptr(dest);
162 
163 	if (dst_metrics) {
164 		u32 *src_metrics = DST_METRICS_PTR(src);
165 
166 		memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
167 	}
168 }
169 
170 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
171 {
172 	return DST_METRICS_PTR(dst);
173 }
174 
175 static inline u32
176 dst_metric_raw(const struct dst_entry *dst, const int metric)
177 {
178 	u32 *p = DST_METRICS_PTR(dst);
179 
180 	return p[metric-1];
181 }
182 
183 static inline u32
184 dst_metric(const struct dst_entry *dst, const int metric)
185 {
186 	WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
187 		     metric == RTAX_ADVMSS ||
188 		     metric == RTAX_MTU);
189 	return dst_metric_raw(dst, metric);
190 }
191 
192 static inline u32
193 dst_metric_advmss(const struct dst_entry *dst)
194 {
195 	u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
196 
197 	if (!advmss)
198 		advmss = dst->ops->default_advmss(dst);
199 
200 	return advmss;
201 }
202 
203 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
204 {
205 	u32 *p = dst_metrics_write_ptr(dst);
206 
207 	if (p)
208 		p[metric-1] = val;
209 }
210 
211 /* Kernel-internal feature bits that are unallocated in user space. */
212 #define DST_FEATURE_ECN_CA	(1 << 31)
213 
214 #define DST_FEATURE_MASK	(DST_FEATURE_ECN_CA)
215 #define DST_FEATURE_ECN_MASK	(DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
216 
217 static inline u32
218 dst_feature(const struct dst_entry *dst, u32 feature)
219 {
220 	return dst_metric(dst, RTAX_FEATURES) & feature;
221 }
222 
223 static inline u32 dst_mtu(const struct dst_entry *dst)
224 {
225 	return dst->ops->mtu(dst);
226 }
227 
228 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
229 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
230 {
231 	return msecs_to_jiffies(dst_metric(dst, metric));
232 }
233 
234 static inline u32
235 dst_allfrag(const struct dst_entry *dst)
236 {
237 	int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
238 	return ret;
239 }
240 
241 static inline int
242 dst_metric_locked(const struct dst_entry *dst, int metric)
243 {
244 	return dst_metric(dst, RTAX_LOCK) & (1<<metric);
245 }
246 
247 static inline void dst_hold(struct dst_entry *dst)
248 {
249 	/*
250 	 * If your kernel compilation stops here, please check
251 	 * __pad_to_align_refcnt declaration in struct dst_entry
252 	 */
253 	BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
254 	WARN_ON(atomic_inc_not_zero(&dst->__refcnt) == 0);
255 }
256 
257 static inline void dst_use(struct dst_entry *dst, unsigned long time)
258 {
259 	dst_hold(dst);
260 	dst->__use++;
261 	dst->lastuse = time;
262 }
263 
264 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
265 {
266 	dst->__use++;
267 	dst->lastuse = time;
268 }
269 
270 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
271 {
272 	if (dst)
273 		atomic_inc(&dst->__refcnt);
274 	return dst;
275 }
276 
277 void dst_release(struct dst_entry *dst);
278 
279 void dst_release_immediate(struct dst_entry *dst);
280 
281 static inline void refdst_drop(unsigned long refdst)
282 {
283 	if (!(refdst & SKB_DST_NOREF))
284 		dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
285 }
286 
287 /**
288  * skb_dst_drop - drops skb dst
289  * @skb: buffer
290  *
291  * Drops dst reference count if a reference was taken.
292  */
293 static inline void skb_dst_drop(struct sk_buff *skb)
294 {
295 	if (skb->_skb_refdst) {
296 		refdst_drop(skb->_skb_refdst);
297 		skb->_skb_refdst = 0UL;
298 	}
299 }
300 
301 static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
302 {
303 	nskb->_skb_refdst = refdst;
304 	if (!(nskb->_skb_refdst & SKB_DST_NOREF))
305 		dst_clone(skb_dst(nskb));
306 }
307 
308 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
309 {
310 	__skb_dst_copy(nskb, oskb->_skb_refdst);
311 }
312 
313 /**
314  * skb_dst_force - makes sure skb dst is refcounted
315  * @skb: buffer
316  *
317  * If dst is not yet refcounted, let's do it
318  */
319 static inline void skb_dst_force(struct sk_buff *skb)
320 {
321 	if (skb_dst_is_noref(skb)) {
322 		WARN_ON(!rcu_read_lock_held());
323 		skb->_skb_refdst &= ~SKB_DST_NOREF;
324 		dst_clone(skb_dst(skb));
325 	}
326 }
327 
328 /**
329  * dst_hold_safe - Take a reference on a dst if possible
330  * @dst: pointer to dst entry
331  *
332  * This helper returns false if it could not safely
333  * take a reference on a dst.
334  */
335 static inline bool dst_hold_safe(struct dst_entry *dst)
336 {
337 	return atomic_inc_not_zero(&dst->__refcnt);
338 }
339 
340 /**
341  * skb_dst_force_safe - makes sure skb dst is refcounted
342  * @skb: buffer
343  *
344  * If dst is not yet refcounted and not destroyed, grab a ref on it.
345  */
346 static inline void skb_dst_force_safe(struct sk_buff *skb)
347 {
348 	if (skb_dst_is_noref(skb)) {
349 		struct dst_entry *dst = skb_dst(skb);
350 
351 		if (!dst_hold_safe(dst))
352 			dst = NULL;
353 
354 		skb->_skb_refdst = (unsigned long)dst;
355 	}
356 }
357 
358 
359 /**
360  *	__skb_tunnel_rx - prepare skb for rx reinsert
361  *	@skb: buffer
362  *	@dev: tunnel device
363  *	@net: netns for packet i/o
364  *
365  *	After decapsulation, packet is going to re-enter (netif_rx()) our stack,
366  *	so make some cleanups. (no accounting done)
367  */
368 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
369 				   struct net *net)
370 {
371 	skb->dev = dev;
372 
373 	/*
374 	 * Clear hash so that we can recalulate the hash for the
375 	 * encapsulated packet, unless we have already determine the hash
376 	 * over the L4 4-tuple.
377 	 */
378 	skb_clear_hash_if_not_l4(skb);
379 	skb_set_queue_mapping(skb, 0);
380 	skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
381 }
382 
383 /**
384  *	skb_tunnel_rx - prepare skb for rx reinsert
385  *	@skb: buffer
386  *	@dev: tunnel device
387  *
388  *	After decapsulation, packet is going to re-enter (netif_rx()) our stack,
389  *	so make some cleanups, and perform accounting.
390  *	Note: this accounting is not SMP safe.
391  */
392 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
393 				 struct net *net)
394 {
395 	/* TODO : stats should be SMP safe */
396 	dev->stats.rx_packets++;
397 	dev->stats.rx_bytes += skb->len;
398 	__skb_tunnel_rx(skb, dev, net);
399 }
400 
401 static inline u32 dst_tclassid(const struct sk_buff *skb)
402 {
403 #ifdef CONFIG_IP_ROUTE_CLASSID
404 	const struct dst_entry *dst;
405 
406 	dst = skb_dst(skb);
407 	if (dst)
408 		return dst->tclassid;
409 #endif
410 	return 0;
411 }
412 
413 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
414 static inline int dst_discard(struct sk_buff *skb)
415 {
416 	return dst_discard_out(&init_net, skb->sk, skb);
417 }
418 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
419 		int initial_obsolete, unsigned short flags);
420 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
421 	      struct net_device *dev, int initial_ref, int initial_obsolete,
422 	      unsigned short flags);
423 struct dst_entry *dst_destroy(struct dst_entry *dst);
424 void dst_dev_put(struct dst_entry *dst);
425 
426 static inline void dst_confirm(struct dst_entry *dst)
427 {
428 }
429 
430 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
431 {
432 	struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
433 	return IS_ERR(n) ? NULL : n;
434 }
435 
436 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
437 						     struct sk_buff *skb)
438 {
439 	struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
440 	return IS_ERR(n) ? NULL : n;
441 }
442 
443 static inline void dst_confirm_neigh(const struct dst_entry *dst,
444 				     const void *daddr)
445 {
446 	if (dst->ops->confirm_neigh)
447 		dst->ops->confirm_neigh(dst, daddr);
448 }
449 
450 static inline void dst_link_failure(struct sk_buff *skb)
451 {
452 	struct dst_entry *dst = skb_dst(skb);
453 	if (dst && dst->ops && dst->ops->link_failure)
454 		dst->ops->link_failure(skb);
455 }
456 
457 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
458 {
459 	unsigned long expires = jiffies + timeout;
460 
461 	if (expires == 0)
462 		expires = 1;
463 
464 	if (dst->expires == 0 || time_before(expires, dst->expires))
465 		dst->expires = expires;
466 }
467 
468 /* Output packet to network from transport.  */
469 static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
470 {
471 	return skb_dst(skb)->output(net, sk, skb);
472 }
473 
474 /* Input packet from network to transport.  */
475 static inline int dst_input(struct sk_buff *skb)
476 {
477 	return skb_dst(skb)->input(skb);
478 }
479 
480 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
481 {
482 	if (dst->obsolete)
483 		dst = dst->ops->check(dst, cookie);
484 	return dst;
485 }
486 
487 /* Flags for xfrm_lookup flags argument. */
488 enum {
489 	XFRM_LOOKUP_ICMP = 1 << 0,
490 	XFRM_LOOKUP_QUEUE = 1 << 1,
491 	XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
492 };
493 
494 struct flowi;
495 #ifndef CONFIG_XFRM
496 static inline struct dst_entry *xfrm_lookup(struct net *net,
497 					    struct dst_entry *dst_orig,
498 					    const struct flowi *fl,
499 					    const struct sock *sk,
500 					    int flags)
501 {
502 	return dst_orig;
503 }
504 
505 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
506 						  struct dst_entry *dst_orig,
507 						  const struct flowi *fl,
508 						  const struct sock *sk,
509 						  int flags)
510 {
511 	return dst_orig;
512 }
513 
514 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
515 {
516 	return NULL;
517 }
518 
519 #else
520 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
521 			      const struct flowi *fl, const struct sock *sk,
522 			      int flags);
523 
524 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
525 				    const struct flowi *fl, const struct sock *sk,
526 				    int flags);
527 
528 /* skb attached with this dst needs transformation if dst->xfrm is valid */
529 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
530 {
531 	return dst->xfrm;
532 }
533 #endif
534 
535 #endif /* _NET_DST_H */
536