xref: /openbmc/linux/include/net/dst.h (revision 8730046c)
1 /*
2  * net/dst.h	Protocol independent destination cache definitions.
3  *
4  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5  *
6  */
7 
8 #ifndef _NET_DST_H
9 #define _NET_DST_H
10 
11 #include <net/dst_ops.h>
12 #include <linux/netdevice.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/rcupdate.h>
15 #include <linux/bug.h>
16 #include <linux/jiffies.h>
17 #include <net/neighbour.h>
18 #include <asm/processor.h>
19 
20 #define DST_GC_MIN	(HZ/10)
21 #define DST_GC_INC	(HZ/2)
22 #define DST_GC_MAX	(120*HZ)
23 
24 /* Each dst_entry has reference count and sits in some parent list(s).
25  * When it is removed from parent list, it is "freed" (dst_free).
26  * After this it enters dead state (dst->obsolete > 0) and if its refcnt
27  * is zero, it can be destroyed immediately, otherwise it is added
28  * to gc list and garbage collector periodically checks the refcnt.
29  */
30 
31 struct sk_buff;
32 
33 struct dst_entry {
34 	struct rcu_head		rcu_head;
35 	struct dst_entry	*child;
36 	struct net_device       *dev;
37 	struct  dst_ops	        *ops;
38 	unsigned long		_metrics;
39 	unsigned long           expires;
40 	struct dst_entry	*path;
41 	struct dst_entry	*from;
42 #ifdef CONFIG_XFRM
43 	struct xfrm_state	*xfrm;
44 #else
45 	void			*__pad1;
46 #endif
47 	int			(*input)(struct sk_buff *);
48 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
49 
50 	unsigned short		flags;
51 #define DST_HOST		0x0001
52 #define DST_NOXFRM		0x0002
53 #define DST_NOPOLICY		0x0004
54 #define DST_NOHASH		0x0008
55 #define DST_NOCACHE		0x0010
56 #define DST_NOCOUNT		0x0020
57 #define DST_FAKE_RTABLE		0x0040
58 #define DST_XFRM_TUNNEL		0x0080
59 #define DST_XFRM_QUEUE		0x0100
60 #define DST_METADATA		0x0200
61 
62 	unsigned short		pending_confirm;
63 
64 	short			error;
65 
66 	/* A non-zero value of dst->obsolete forces by-hand validation
67 	 * of the route entry.  Positive values are set by the generic
68 	 * dst layer to indicate that the entry has been forcefully
69 	 * destroyed.
70 	 *
71 	 * Negative values are used by the implementation layer code to
72 	 * force invocation of the dst_ops->check() method.
73 	 */
74 	short			obsolete;
75 #define DST_OBSOLETE_NONE	0
76 #define DST_OBSOLETE_DEAD	2
77 #define DST_OBSOLETE_FORCE_CHK	-1
78 #define DST_OBSOLETE_KILL	-2
79 	unsigned short		header_len;	/* more space at head required */
80 	unsigned short		trailer_len;	/* space to reserve at tail */
81 #ifdef CONFIG_IP_ROUTE_CLASSID
82 	__u32			tclassid;
83 #else
84 	__u32			__pad2;
85 #endif
86 
87 #ifdef CONFIG_64BIT
88 	/*
89 	 * Align __refcnt to a 64 bytes alignment
90 	 * (L1_CACHE_SIZE would be too much)
91 	 */
92 	long			__pad_to_align_refcnt[2];
93 #endif
94 	/*
95 	 * __refcnt wants to be on a different cache line from
96 	 * input/output/ops or performance tanks badly
97 	 */
98 	atomic_t		__refcnt;	/* client references	*/
99 	int			__use;
100 	unsigned long		lastuse;
101 	struct lwtunnel_state   *lwtstate;
102 	union {
103 		struct dst_entry	*next;
104 		struct rtable __rcu	*rt_next;
105 		struct rt6_info		*rt6_next;
106 		struct dn_route __rcu	*dn_next;
107 	};
108 };
109 
110 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
111 extern const u32 dst_default_metrics[];
112 
113 #define DST_METRICS_READ_ONLY		0x1UL
114 #define DST_METRICS_FLAGS		0x3UL
115 #define __DST_METRICS_PTR(Y)	\
116 	((u32 *)((Y) & ~DST_METRICS_FLAGS))
117 #define DST_METRICS_PTR(X)	__DST_METRICS_PTR((X)->_metrics)
118 
119 static inline bool dst_metrics_read_only(const struct dst_entry *dst)
120 {
121 	return dst->_metrics & DST_METRICS_READ_ONLY;
122 }
123 
124 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
125 
126 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
127 {
128 	unsigned long val = dst->_metrics;
129 	if (!(val & DST_METRICS_READ_ONLY))
130 		__dst_destroy_metrics_generic(dst, val);
131 }
132 
133 static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
134 {
135 	unsigned long p = dst->_metrics;
136 
137 	BUG_ON(!p);
138 
139 	if (p & DST_METRICS_READ_ONLY)
140 		return dst->ops->cow_metrics(dst, p);
141 	return __DST_METRICS_PTR(p);
142 }
143 
144 /* This may only be invoked before the entry has reached global
145  * visibility.
146  */
147 static inline void dst_init_metrics(struct dst_entry *dst,
148 				    const u32 *src_metrics,
149 				    bool read_only)
150 {
151 	dst->_metrics = ((unsigned long) src_metrics) |
152 		(read_only ? DST_METRICS_READ_ONLY : 0);
153 }
154 
155 static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
156 {
157 	u32 *dst_metrics = dst_metrics_write_ptr(dest);
158 
159 	if (dst_metrics) {
160 		u32 *src_metrics = DST_METRICS_PTR(src);
161 
162 		memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
163 	}
164 }
165 
166 static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
167 {
168 	return DST_METRICS_PTR(dst);
169 }
170 
171 static inline u32
172 dst_metric_raw(const struct dst_entry *dst, const int metric)
173 {
174 	u32 *p = DST_METRICS_PTR(dst);
175 
176 	return p[metric-1];
177 }
178 
179 static inline u32
180 dst_metric(const struct dst_entry *dst, const int metric)
181 {
182 	WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
183 		     metric == RTAX_ADVMSS ||
184 		     metric == RTAX_MTU);
185 	return dst_metric_raw(dst, metric);
186 }
187 
188 static inline u32
189 dst_metric_advmss(const struct dst_entry *dst)
190 {
191 	u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
192 
193 	if (!advmss)
194 		advmss = dst->ops->default_advmss(dst);
195 
196 	return advmss;
197 }
198 
199 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
200 {
201 	u32 *p = dst_metrics_write_ptr(dst);
202 
203 	if (p)
204 		p[metric-1] = val;
205 }
206 
207 /* Kernel-internal feature bits that are unallocated in user space. */
208 #define DST_FEATURE_ECN_CA	(1 << 31)
209 
210 #define DST_FEATURE_MASK	(DST_FEATURE_ECN_CA)
211 #define DST_FEATURE_ECN_MASK	(DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
212 
213 static inline u32
214 dst_feature(const struct dst_entry *dst, u32 feature)
215 {
216 	return dst_metric(dst, RTAX_FEATURES) & feature;
217 }
218 
219 static inline u32 dst_mtu(const struct dst_entry *dst)
220 {
221 	return dst->ops->mtu(dst);
222 }
223 
224 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
225 static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
226 {
227 	return msecs_to_jiffies(dst_metric(dst, metric));
228 }
229 
230 static inline u32
231 dst_allfrag(const struct dst_entry *dst)
232 {
233 	int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
234 	return ret;
235 }
236 
237 static inline int
238 dst_metric_locked(const struct dst_entry *dst, int metric)
239 {
240 	return dst_metric(dst, RTAX_LOCK) & (1<<metric);
241 }
242 
243 static inline void dst_hold(struct dst_entry *dst)
244 {
245 	/*
246 	 * If your kernel compilation stops here, please check
247 	 * __pad_to_align_refcnt declaration in struct dst_entry
248 	 */
249 	BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
250 	atomic_inc(&dst->__refcnt);
251 }
252 
253 static inline void dst_use(struct dst_entry *dst, unsigned long time)
254 {
255 	dst_hold(dst);
256 	dst->__use++;
257 	dst->lastuse = time;
258 }
259 
260 static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
261 {
262 	dst->__use++;
263 	dst->lastuse = time;
264 }
265 
266 static inline struct dst_entry *dst_clone(struct dst_entry *dst)
267 {
268 	if (dst)
269 		atomic_inc(&dst->__refcnt);
270 	return dst;
271 }
272 
273 void dst_release(struct dst_entry *dst);
274 
275 static inline void refdst_drop(unsigned long refdst)
276 {
277 	if (!(refdst & SKB_DST_NOREF))
278 		dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
279 }
280 
281 /**
282  * skb_dst_drop - drops skb dst
283  * @skb: buffer
284  *
285  * Drops dst reference count if a reference was taken.
286  */
287 static inline void skb_dst_drop(struct sk_buff *skb)
288 {
289 	if (skb->_skb_refdst) {
290 		refdst_drop(skb->_skb_refdst);
291 		skb->_skb_refdst = 0UL;
292 	}
293 }
294 
295 static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
296 {
297 	nskb->_skb_refdst = refdst;
298 	if (!(nskb->_skb_refdst & SKB_DST_NOREF))
299 		dst_clone(skb_dst(nskb));
300 }
301 
302 static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
303 {
304 	__skb_dst_copy(nskb, oskb->_skb_refdst);
305 }
306 
307 /**
308  * skb_dst_force - makes sure skb dst is refcounted
309  * @skb: buffer
310  *
311  * If dst is not yet refcounted, let's do it
312  */
313 static inline void skb_dst_force(struct sk_buff *skb)
314 {
315 	if (skb_dst_is_noref(skb)) {
316 		WARN_ON(!rcu_read_lock_held());
317 		skb->_skb_refdst &= ~SKB_DST_NOREF;
318 		dst_clone(skb_dst(skb));
319 	}
320 }
321 
322 /**
323  * dst_hold_safe - Take a reference on a dst if possible
324  * @dst: pointer to dst entry
325  *
326  * This helper returns false if it could not safely
327  * take a reference on a dst.
328  */
329 static inline bool dst_hold_safe(struct dst_entry *dst)
330 {
331 	if (dst->flags & DST_NOCACHE)
332 		return atomic_inc_not_zero(&dst->__refcnt);
333 	dst_hold(dst);
334 	return true;
335 }
336 
337 /**
338  * skb_dst_force_safe - makes sure skb dst is refcounted
339  * @skb: buffer
340  *
341  * If dst is not yet refcounted and not destroyed, grab a ref on it.
342  */
343 static inline void skb_dst_force_safe(struct sk_buff *skb)
344 {
345 	if (skb_dst_is_noref(skb)) {
346 		struct dst_entry *dst = skb_dst(skb);
347 
348 		if (!dst_hold_safe(dst))
349 			dst = NULL;
350 
351 		skb->_skb_refdst = (unsigned long)dst;
352 	}
353 }
354 
355 
356 /**
357  *	__skb_tunnel_rx - prepare skb for rx reinsert
358  *	@skb: buffer
359  *	@dev: tunnel device
360  *	@net: netns for packet i/o
361  *
362  *	After decapsulation, packet is going to re-enter (netif_rx()) our stack,
363  *	so make some cleanups. (no accounting done)
364  */
365 static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
366 				   struct net *net)
367 {
368 	skb->dev = dev;
369 
370 	/*
371 	 * Clear hash so that we can recalulate the hash for the
372 	 * encapsulated packet, unless we have already determine the hash
373 	 * over the L4 4-tuple.
374 	 */
375 	skb_clear_hash_if_not_l4(skb);
376 	skb_set_queue_mapping(skb, 0);
377 	skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
378 }
379 
380 /**
381  *	skb_tunnel_rx - prepare skb for rx reinsert
382  *	@skb: buffer
383  *	@dev: tunnel device
384  *
385  *	After decapsulation, packet is going to re-enter (netif_rx()) our stack,
386  *	so make some cleanups, and perform accounting.
387  *	Note: this accounting is not SMP safe.
388  */
389 static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
390 				 struct net *net)
391 {
392 	/* TODO : stats should be SMP safe */
393 	dev->stats.rx_packets++;
394 	dev->stats.rx_bytes += skb->len;
395 	__skb_tunnel_rx(skb, dev, net);
396 }
397 
398 static inline u32 dst_tclassid(const struct sk_buff *skb)
399 {
400 #ifdef CONFIG_IP_ROUTE_CLASSID
401 	const struct dst_entry *dst;
402 
403 	dst = skb_dst(skb);
404 	if (dst)
405 		return dst->tclassid;
406 #endif
407 	return 0;
408 }
409 
410 int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
411 static inline int dst_discard(struct sk_buff *skb)
412 {
413 	return dst_discard_out(&init_net, skb->sk, skb);
414 }
415 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
416 		int initial_obsolete, unsigned short flags);
417 void dst_init(struct dst_entry *dst, struct dst_ops *ops,
418 	      struct net_device *dev, int initial_ref, int initial_obsolete,
419 	      unsigned short flags);
420 void __dst_free(struct dst_entry *dst);
421 struct dst_entry *dst_destroy(struct dst_entry *dst);
422 
423 static inline void dst_free(struct dst_entry *dst)
424 {
425 	if (dst->obsolete > 0)
426 		return;
427 	if (!atomic_read(&dst->__refcnt)) {
428 		dst = dst_destroy(dst);
429 		if (!dst)
430 			return;
431 	}
432 	__dst_free(dst);
433 }
434 
435 static inline void dst_rcu_free(struct rcu_head *head)
436 {
437 	struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
438 	dst_free(dst);
439 }
440 
441 static inline void dst_confirm(struct dst_entry *dst)
442 {
443 	dst->pending_confirm = 1;
444 }
445 
446 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
447 				   struct sk_buff *skb)
448 {
449 	const struct hh_cache *hh;
450 
451 	if (dst->pending_confirm) {
452 		unsigned long now = jiffies;
453 
454 		dst->pending_confirm = 0;
455 		/* avoid dirtying neighbour */
456 		if (n->confirmed != now)
457 			n->confirmed = now;
458 	}
459 
460 	hh = &n->hh;
461 	if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
462 		return neigh_hh_output(hh, skb);
463 	else
464 		return n->output(n, skb);
465 }
466 
467 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
468 {
469 	struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
470 	return IS_ERR(n) ? NULL : n;
471 }
472 
473 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
474 						     struct sk_buff *skb)
475 {
476 	struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
477 	return IS_ERR(n) ? NULL : n;
478 }
479 
480 static inline void dst_link_failure(struct sk_buff *skb)
481 {
482 	struct dst_entry *dst = skb_dst(skb);
483 	if (dst && dst->ops && dst->ops->link_failure)
484 		dst->ops->link_failure(skb);
485 }
486 
487 static inline void dst_set_expires(struct dst_entry *dst, int timeout)
488 {
489 	unsigned long expires = jiffies + timeout;
490 
491 	if (expires == 0)
492 		expires = 1;
493 
494 	if (dst->expires == 0 || time_before(expires, dst->expires))
495 		dst->expires = expires;
496 }
497 
498 /* Output packet to network from transport.  */
499 static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
500 {
501 	return skb_dst(skb)->output(net, sk, skb);
502 }
503 
504 /* Input packet from network to transport.  */
505 static inline int dst_input(struct sk_buff *skb)
506 {
507 	return skb_dst(skb)->input(skb);
508 }
509 
510 static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
511 {
512 	if (dst->obsolete)
513 		dst = dst->ops->check(dst, cookie);
514 	return dst;
515 }
516 
517 void dst_subsys_init(void);
518 
519 /* Flags for xfrm_lookup flags argument. */
520 enum {
521 	XFRM_LOOKUP_ICMP = 1 << 0,
522 	XFRM_LOOKUP_QUEUE = 1 << 1,
523 	XFRM_LOOKUP_KEEP_DST_REF = 1 << 2,
524 };
525 
526 struct flowi;
527 #ifndef CONFIG_XFRM
528 static inline struct dst_entry *xfrm_lookup(struct net *net,
529 					    struct dst_entry *dst_orig,
530 					    const struct flowi *fl,
531 					    const struct sock *sk,
532 					    int flags)
533 {
534 	return dst_orig;
535 }
536 
537 static inline struct dst_entry *xfrm_lookup_route(struct net *net,
538 						  struct dst_entry *dst_orig,
539 						  const struct flowi *fl,
540 						  const struct sock *sk,
541 						  int flags)
542 {
543 	return dst_orig;
544 }
545 
546 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
547 {
548 	return NULL;
549 }
550 
551 #else
552 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
553 			      const struct flowi *fl, const struct sock *sk,
554 			      int flags);
555 
556 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
557 				    const struct flowi *fl, const struct sock *sk,
558 				    int flags);
559 
560 /* skb attached with this dst needs transformation if dst->xfrm is valid */
561 static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
562 {
563 	return dst->xfrm;
564 }
565 #endif
566 
567 #endif /* _NET_DST_H */
568