xref: /openbmc/linux/include/net/xfrm.h (revision c1530660)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4 
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19 
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_fib.h>
26 #include <net/flow.h>
27 #include <net/gro_cells.h>
28 
29 #include <linux/interrupt.h>
30 
31 #ifdef CONFIG_XFRM_STATISTICS
32 #include <net/snmp.h>
33 #endif
34 
35 #define XFRM_PROTO_ESP		50
36 #define XFRM_PROTO_AH		51
37 #define XFRM_PROTO_COMP		108
38 #define XFRM_PROTO_IPIP		4
39 #define XFRM_PROTO_IPV6		41
40 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
41 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
42 
43 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
44 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
45 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
46 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
47 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
48 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
49 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
50 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
51 
52 #ifdef CONFIG_XFRM_STATISTICS
53 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
54 #else
55 #define XFRM_INC_STATS(net, field)	((void)(net))
56 #endif
57 
58 
59 /* Organization of SPD aka "XFRM rules"
60    ------------------------------------
61 
62    Basic objects:
63    - policy rule, struct xfrm_policy (=SPD entry)
64    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65    - instance of a transformer, struct xfrm_state (=SA)
66    - template to clone xfrm_state, struct xfrm_tmpl
67 
68    SPD is plain linear list of xfrm_policy rules, ordered by priority.
69    (To be compatible with existing pfkeyv2 implementations,
70    many rules with priority of 0x7fffffff are allowed to exist and
71    such rules are ordered in an unpredictable way, thanks to bsd folks.)
72 
73    Lookup is plain linear search until the first match with selector.
74 
75    If "action" is "block", then we prohibit the flow, otherwise:
76    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
77    policy entry has list of up to XFRM_MAX_DEPTH transformations,
78    described by templates xfrm_tmpl. Each template is resolved
79    to a complete xfrm_state (see below) and we pack bundle of transformations
80    to a dst_entry returned to requestor.
81 
82    dst -. xfrm  .-> xfrm_state #1
83     |---. child .-> dst -. xfrm .-> xfrm_state #2
84                      |---. child .-> dst -. xfrm .-> xfrm_state #3
85                                       |---. child .-> NULL
86 
87    Bundles are cached at xrfm_policy struct (field ->bundles).
88 
89 
90    Resolution of xrfm_tmpl
91    -----------------------
92    Template contains:
93    1. ->mode		Mode: transport or tunnel
94    2. ->id.proto	Protocol: AH/ESP/IPCOMP
95    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
96       Q: allow to resolve security gateway?
97    4. ->id.spi          If not zero, static SPI.
98    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
99    6. ->algos		List of allowed algos. Plain bitmask now.
100       Q: ealgos, aalgos, calgos. What a mess...
101    7. ->share		Sharing mode.
102       Q: how to implement private sharing mode? To add struct sock* to
103       flow id?
104 
105    Having this template we search through SAD searching for entries
106    with appropriate mode/proto/algo, permitted by selector.
107    If no appropriate entry found, it is requested from key manager.
108 
109    PROBLEMS:
110    Q: How to find all the bundles referring to a physical path for
111       PMTU discovery? Seems, dst should contain list of all parents...
112       and enter to infinite locking hierarchy disaster.
113       No! It is easier, we will not search for them, let them find us.
114       We add genid to each dst plus pointer to genid of raw IP route,
115       pmtu disc will update pmtu on raw IP route and increase its genid.
116       dst_check() will see this for top level and trigger resyncing
117       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
118  */
119 
120 struct xfrm_state_walk {
121 	struct list_head	all;
122 	u8			state;
123 	u8			dying;
124 	u8			proto;
125 	u32			seq;
126 	struct xfrm_address_filter *filter;
127 };
128 
129 enum {
130 	XFRM_DEV_OFFLOAD_IN = 1,
131 	XFRM_DEV_OFFLOAD_OUT,
132 	XFRM_DEV_OFFLOAD_FWD,
133 };
134 
135 enum {
136 	XFRM_DEV_OFFLOAD_UNSPECIFIED,
137 	XFRM_DEV_OFFLOAD_CRYPTO,
138 	XFRM_DEV_OFFLOAD_PACKET,
139 };
140 
141 enum {
142 	XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
143 };
144 
145 struct xfrm_dev_offload {
146 	struct net_device	*dev;
147 	netdevice_tracker	dev_tracker;
148 	struct net_device	*real_dev;
149 	unsigned long		offload_handle;
150 	u8			dir : 2;
151 	u8			type : 2;
152 	u8			flags : 2;
153 };
154 
155 struct xfrm_mode {
156 	u8 encap;
157 	u8 family;
158 	u8 flags;
159 };
160 
161 /* Flags for xfrm_mode. */
162 enum {
163 	XFRM_MODE_FLAG_TUNNEL = 1,
164 };
165 
166 enum xfrm_replay_mode {
167 	XFRM_REPLAY_MODE_LEGACY,
168 	XFRM_REPLAY_MODE_BMP,
169 	XFRM_REPLAY_MODE_ESN,
170 };
171 
172 /* Full description of state of transformer. */
173 struct xfrm_state {
174 	possible_net_t		xs_net;
175 	union {
176 		struct hlist_node	gclist;
177 		struct hlist_node	bydst;
178 	};
179 	union {
180 		struct hlist_node	dev_gclist;
181 		struct hlist_node	bysrc;
182 	};
183 	struct hlist_node	byspi;
184 	struct hlist_node	byseq;
185 
186 	refcount_t		refcnt;
187 	spinlock_t		lock;
188 
189 	struct xfrm_id		id;
190 	struct xfrm_selector	sel;
191 	struct xfrm_mark	mark;
192 	u32			if_id;
193 	u32			tfcpad;
194 
195 	u32			genid;
196 
197 	/* Key manager bits */
198 	struct xfrm_state_walk	km;
199 
200 	/* Parameters of this state. */
201 	struct {
202 		u32		reqid;
203 		u8		mode;
204 		u8		replay_window;
205 		u8		aalgo, ealgo, calgo;
206 		u8		flags;
207 		u16		family;
208 		xfrm_address_t	saddr;
209 		int		header_len;
210 		int		trailer_len;
211 		u32		extra_flags;
212 		struct xfrm_mark	smark;
213 	} props;
214 
215 	struct xfrm_lifetime_cfg lft;
216 
217 	/* Data for transformer */
218 	struct xfrm_algo_auth	*aalg;
219 	struct xfrm_algo	*ealg;
220 	struct xfrm_algo	*calg;
221 	struct xfrm_algo_aead	*aead;
222 	const char		*geniv;
223 
224 	/* mapping change rate limiting */
225 	__be16 new_mapping_sport;
226 	u32 new_mapping;	/* seconds */
227 	u32 mapping_maxage;	/* seconds for input SA */
228 
229 	/* Data for encapsulator */
230 	struct xfrm_encap_tmpl	*encap;
231 	struct sock __rcu	*encap_sk;
232 
233 	/* Data for care-of address */
234 	xfrm_address_t	*coaddr;
235 
236 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
237 	struct xfrm_state	*tunnel;
238 
239 	/* If a tunnel, number of users + 1 */
240 	atomic_t		tunnel_users;
241 
242 	/* State for replay detection */
243 	struct xfrm_replay_state replay;
244 	struct xfrm_replay_state_esn *replay_esn;
245 
246 	/* Replay detection state at the time we sent the last notification */
247 	struct xfrm_replay_state preplay;
248 	struct xfrm_replay_state_esn *preplay_esn;
249 
250 	/* replay detection mode */
251 	enum xfrm_replay_mode    repl_mode;
252 	/* internal flag that only holds state for delayed aevent at the
253 	 * moment
254 	*/
255 	u32			xflags;
256 
257 	/* Replay detection notification settings */
258 	u32			replay_maxage;
259 	u32			replay_maxdiff;
260 
261 	/* Replay detection notification timer */
262 	struct timer_list	rtimer;
263 
264 	/* Statistics */
265 	struct xfrm_stats	stats;
266 
267 	struct xfrm_lifetime_cur curlft;
268 	struct hrtimer		mtimer;
269 
270 	struct xfrm_dev_offload xso;
271 
272 	/* used to fix curlft->add_time when changing date */
273 	long		saved_tmo;
274 
275 	/* Last used time */
276 	time64_t		lastused;
277 
278 	struct page_frag xfrag;
279 
280 	/* Reference to data common to all the instances of this
281 	 * transformer. */
282 	const struct xfrm_type	*type;
283 	struct xfrm_mode	inner_mode;
284 	struct xfrm_mode	inner_mode_iaf;
285 	struct xfrm_mode	outer_mode;
286 
287 	const struct xfrm_type_offload	*type_offload;
288 
289 	/* Security context */
290 	struct xfrm_sec_ctx	*security;
291 
292 	/* Private data of this transformer, format is opaque,
293 	 * interpreted by xfrm_type methods. */
294 	void			*data;
295 };
296 
xs_net(struct xfrm_state * x)297 static inline struct net *xs_net(struct xfrm_state *x)
298 {
299 	return read_pnet(&x->xs_net);
300 }
301 
302 /* xflags - make enum if more show up */
303 #define XFRM_TIME_DEFER	1
304 #define XFRM_SOFT_EXPIRE 2
305 
306 enum {
307 	XFRM_STATE_VOID,
308 	XFRM_STATE_ACQ,
309 	XFRM_STATE_VALID,
310 	XFRM_STATE_ERROR,
311 	XFRM_STATE_EXPIRED,
312 	XFRM_STATE_DEAD
313 };
314 
315 /* callback structure passed from either netlink or pfkey */
316 struct km_event {
317 	union {
318 		u32 hard;
319 		u32 proto;
320 		u32 byid;
321 		u32 aevent;
322 		u32 type;
323 	} data;
324 
325 	u32	seq;
326 	u32	portid;
327 	u32	event;
328 	struct net *net;
329 };
330 
331 struct xfrm_if_decode_session_result {
332 	struct net *net;
333 	u32 if_id;
334 };
335 
336 struct xfrm_if_cb {
337 	bool (*decode_session)(struct sk_buff *skb,
338 			       unsigned short family,
339 			       struct xfrm_if_decode_session_result *res);
340 };
341 
342 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
343 void xfrm_if_unregister_cb(void);
344 
345 struct xfrm_dst_lookup_params {
346 	struct net *net;
347 	int tos;
348 	int oif;
349 	xfrm_address_t *saddr;
350 	xfrm_address_t *daddr;
351 	u32 mark;
352 	__u8 ipproto;
353 	union flowi_uli uli;
354 };
355 
356 struct net_device;
357 struct xfrm_type;
358 struct xfrm_dst;
359 struct xfrm_policy_afinfo {
360 	struct dst_ops		*dst_ops;
361 	struct dst_entry	*(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
362 	int			(*get_saddr)(xfrm_address_t *saddr,
363 					     const struct xfrm_dst_lookup_params *params);
364 	int			(*fill_dst)(struct xfrm_dst *xdst,
365 					    struct net_device *dev,
366 					    const struct flowi *fl);
367 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
368 };
369 
370 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
371 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
372 void km_policy_notify(struct xfrm_policy *xp, int dir,
373 		      const struct km_event *c);
374 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
375 
376 struct xfrm_tmpl;
377 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
378 	     struct xfrm_policy *pol);
379 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
380 int __xfrm_state_delete(struct xfrm_state *x);
381 
382 struct xfrm_state_afinfo {
383 	u8				family;
384 	u8				proto;
385 
386 	const struct xfrm_type_offload *type_offload_esp;
387 
388 	const struct xfrm_type		*type_esp;
389 	const struct xfrm_type		*type_ipip;
390 	const struct xfrm_type		*type_ipip6;
391 	const struct xfrm_type		*type_comp;
392 	const struct xfrm_type		*type_ah;
393 	const struct xfrm_type		*type_routing;
394 	const struct xfrm_type		*type_dstopts;
395 
396 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
397 	int			(*transport_finish)(struct sk_buff *skb,
398 						    int async);
399 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
400 };
401 
402 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
403 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
404 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
405 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
406 
407 struct xfrm_input_afinfo {
408 	u8			family;
409 	bool			is_ipip;
410 	int			(*callback)(struct sk_buff *skb, u8 protocol,
411 					    int err);
412 };
413 
414 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
415 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
416 
417 void xfrm_flush_gc(void);
418 void xfrm_state_delete_tunnel(struct xfrm_state *x);
419 
420 struct xfrm_type {
421 	struct module		*owner;
422 	u8			proto;
423 	u8			flags;
424 #define XFRM_TYPE_NON_FRAGMENT	1
425 #define XFRM_TYPE_REPLAY_PROT	2
426 #define XFRM_TYPE_LOCAL_COADDR	4
427 #define XFRM_TYPE_REMOTE_COADDR	8
428 
429 	int			(*init_state)(struct xfrm_state *x,
430 					      struct netlink_ext_ack *extack);
431 	void			(*destructor)(struct xfrm_state *);
432 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
433 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
434 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
435 					  const struct flowi *);
436 };
437 
438 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
439 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
440 
441 struct xfrm_type_offload {
442 	struct module	*owner;
443 	u8		proto;
444 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
445 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
446 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
447 };
448 
449 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
450 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
451 
xfrm_af2proto(unsigned int family)452 static inline int xfrm_af2proto(unsigned int family)
453 {
454 	switch(family) {
455 	case AF_INET:
456 		return IPPROTO_IPIP;
457 	case AF_INET6:
458 		return IPPROTO_IPV6;
459 	default:
460 		return 0;
461 	}
462 }
463 
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)464 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
465 {
466 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
467 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
468 		return &x->inner_mode;
469 	else
470 		return &x->inner_mode_iaf;
471 }
472 
473 struct xfrm_tmpl {
474 /* id in template is interpreted as:
475  * daddr - destination of tunnel, may be zero for transport mode.
476  * spi   - zero to acquire spi. Not zero if spi is static, then
477  *	   daddr must be fixed too.
478  * proto - AH/ESP/IPCOMP
479  */
480 	struct xfrm_id		id;
481 
482 /* Source address of tunnel. Ignored, if it is not a tunnel. */
483 	xfrm_address_t		saddr;
484 
485 	unsigned short		encap_family;
486 
487 	u32			reqid;
488 
489 /* Mode: transport, tunnel etc. */
490 	u8			mode;
491 
492 /* Sharing mode: unique, this session only, this user only etc. */
493 	u8			share;
494 
495 /* May skip this transfomration if no SA is found */
496 	u8			optional;
497 
498 /* Skip aalgos/ealgos/calgos checks. */
499 	u8			allalgs;
500 
501 /* Bit mask of algos allowed for acquisition */
502 	u32			aalgos;
503 	u32			ealgos;
504 	u32			calgos;
505 };
506 
507 #define XFRM_MAX_DEPTH		6
508 #define XFRM_MAX_OFFLOAD_DEPTH	1
509 
510 struct xfrm_policy_walk_entry {
511 	struct list_head	all;
512 	u8			dead;
513 };
514 
515 struct xfrm_policy_walk {
516 	struct xfrm_policy_walk_entry walk;
517 	u8 type;
518 	u32 seq;
519 };
520 
521 struct xfrm_policy_queue {
522 	struct sk_buff_head	hold_queue;
523 	struct timer_list	hold_timer;
524 	unsigned long		timeout;
525 };
526 
527 struct xfrm_policy {
528 	possible_net_t		xp_net;
529 	struct hlist_node	bydst;
530 	struct hlist_node	byidx;
531 
532 	/* This lock only affects elements except for entry. */
533 	rwlock_t		lock;
534 	refcount_t		refcnt;
535 	u32			pos;
536 	struct timer_list	timer;
537 
538 	atomic_t		genid;
539 	u32			priority;
540 	u32			index;
541 	u32			if_id;
542 	struct xfrm_mark	mark;
543 	struct xfrm_selector	selector;
544 	struct xfrm_lifetime_cfg lft;
545 	struct xfrm_lifetime_cur curlft;
546 	struct xfrm_policy_walk_entry walk;
547 	struct xfrm_policy_queue polq;
548 	bool                    bydst_reinsert;
549 	u8			type;
550 	u8			action;
551 	u8			flags;
552 	u8			xfrm_nr;
553 	u16			family;
554 	struct xfrm_sec_ctx	*security;
555 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
556 	struct hlist_node	bydst_inexact_list;
557 	struct rcu_head		rcu;
558 
559 	struct xfrm_dev_offload xdo;
560 };
561 
xp_net(const struct xfrm_policy * xp)562 static inline struct net *xp_net(const struct xfrm_policy *xp)
563 {
564 	return read_pnet(&xp->xp_net);
565 }
566 
567 struct xfrm_kmaddress {
568 	xfrm_address_t          local;
569 	xfrm_address_t          remote;
570 	u32			reserved;
571 	u16			family;
572 };
573 
574 struct xfrm_migrate {
575 	xfrm_address_t		old_daddr;
576 	xfrm_address_t		old_saddr;
577 	xfrm_address_t		new_daddr;
578 	xfrm_address_t		new_saddr;
579 	u8			proto;
580 	u8			mode;
581 	u16			reserved;
582 	u32			reqid;
583 	u16			old_family;
584 	u16			new_family;
585 };
586 
587 #define XFRM_KM_TIMEOUT                30
588 /* what happened */
589 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
590 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
591 
592 /* default aevent timeout in units of 100ms */
593 #define XFRM_AE_ETIME			10
594 /* Async Event timer multiplier */
595 #define XFRM_AE_ETH_M			10
596 /* default seq threshold size */
597 #define XFRM_AE_SEQT_SIZE		2
598 
599 struct xfrm_mgr {
600 	struct list_head	list;
601 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
602 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
603 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
604 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
605 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
606 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
607 	int			(*migrate)(const struct xfrm_selector *sel,
608 					   u8 dir, u8 type,
609 					   const struct xfrm_migrate *m,
610 					   int num_bundles,
611 					   const struct xfrm_kmaddress *k,
612 					   const struct xfrm_encap_tmpl *encap);
613 	bool			(*is_alive)(const struct km_event *c);
614 };
615 
616 void xfrm_register_km(struct xfrm_mgr *km);
617 void xfrm_unregister_km(struct xfrm_mgr *km);
618 
619 struct xfrm_tunnel_skb_cb {
620 	union {
621 		struct inet_skb_parm h4;
622 		struct inet6_skb_parm h6;
623 	} header;
624 
625 	union {
626 		struct ip_tunnel *ip4;
627 		struct ip6_tnl *ip6;
628 	} tunnel;
629 };
630 
631 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
632 
633 /*
634  * This structure is used for the duration where packets are being
635  * transformed by IPsec.  As soon as the packet leaves IPsec the
636  * area beyond the generic IP part may be overwritten.
637  */
638 struct xfrm_skb_cb {
639 	struct xfrm_tunnel_skb_cb header;
640 
641         /* Sequence number for replay protection. */
642 	union {
643 		struct {
644 			__u32 low;
645 			__u32 hi;
646 		} output;
647 		struct {
648 			__be32 low;
649 			__be32 hi;
650 		} input;
651 	} seq;
652 };
653 
654 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
655 
656 /*
657  * This structure is used by the afinfo prepare_input/prepare_output functions
658  * to transmit header information to the mode input/output functions.
659  */
660 struct xfrm_mode_skb_cb {
661 	struct xfrm_tunnel_skb_cb header;
662 
663 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
664 	__be16 id;
665 	__be16 frag_off;
666 
667 	/* IP header length (excluding options or extension headers). */
668 	u8 ihl;
669 
670 	/* TOS for IPv4, class for IPv6. */
671 	u8 tos;
672 
673 	/* TTL for IPv4, hop limitfor IPv6. */
674 	u8 ttl;
675 
676 	/* Protocol for IPv4, NH for IPv6. */
677 	u8 protocol;
678 
679 	/* Option length for IPv4, zero for IPv6. */
680 	u8 optlen;
681 
682 	/* Used by IPv6 only, zero for IPv4. */
683 	u8 flow_lbl[3];
684 };
685 
686 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
687 
688 /*
689  * This structure is used by the input processing to locate the SPI and
690  * related information.
691  */
692 struct xfrm_spi_skb_cb {
693 	struct xfrm_tunnel_skb_cb header;
694 
695 	unsigned int daddroff;
696 	unsigned int family;
697 	__be32 seq;
698 };
699 
700 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
701 
702 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)703 static inline struct audit_buffer *xfrm_audit_start(const char *op)
704 {
705 	struct audit_buffer *audit_buf = NULL;
706 
707 	if (audit_enabled == AUDIT_OFF)
708 		return NULL;
709 	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
710 				    AUDIT_MAC_IPSEC_EVENT);
711 	if (audit_buf == NULL)
712 		return NULL;
713 	audit_log_format(audit_buf, "op=%s", op);
714 	return audit_buf;
715 }
716 
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)717 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
718 					     struct audit_buffer *audit_buf)
719 {
720 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
721 					    audit_get_loginuid(current) :
722 					    INVALID_UID);
723 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
724 		AUDIT_SID_UNSET;
725 
726 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
727 	audit_log_task_context(audit_buf);
728 }
729 
730 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
731 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
732 			      bool task_valid);
733 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
734 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
735 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
736 				      struct sk_buff *skb);
737 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
738 			     __be32 net_seq);
739 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
740 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
741 			       __be32 net_seq);
742 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
743 			      u8 proto);
744 #else
745 
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)746 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
747 					 bool task_valid)
748 {
749 }
750 
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)751 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
752 					    bool task_valid)
753 {
754 }
755 
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)756 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
757 					bool task_valid)
758 {
759 }
760 
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)761 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
762 					   bool task_valid)
763 {
764 }
765 
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)766 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
767 					     struct sk_buff *skb)
768 {
769 }
770 
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)771 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
772 					   struct sk_buff *skb, __be32 net_seq)
773 {
774 }
775 
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)776 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
777 				      u16 family)
778 {
779 }
780 
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)781 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
782 				      __be32 net_spi, __be32 net_seq)
783 {
784 }
785 
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)786 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
787 				     struct sk_buff *skb, u8 proto)
788 {
789 }
790 #endif /* CONFIG_AUDITSYSCALL */
791 
xfrm_pol_hold(struct xfrm_policy * policy)792 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
793 {
794 	if (likely(policy != NULL))
795 		refcount_inc(&policy->refcnt);
796 }
797 
798 void xfrm_policy_destroy(struct xfrm_policy *policy);
799 
xfrm_pol_put(struct xfrm_policy * policy)800 static inline void xfrm_pol_put(struct xfrm_policy *policy)
801 {
802 	if (refcount_dec_and_test(&policy->refcnt))
803 		xfrm_policy_destroy(policy);
804 }
805 
xfrm_pols_put(struct xfrm_policy ** pols,int npols)806 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
807 {
808 	int i;
809 	for (i = npols - 1; i >= 0; --i)
810 		xfrm_pol_put(pols[i]);
811 }
812 
813 void __xfrm_state_destroy(struct xfrm_state *, bool);
814 
__xfrm_state_put(struct xfrm_state * x)815 static inline void __xfrm_state_put(struct xfrm_state *x)
816 {
817 	refcount_dec(&x->refcnt);
818 }
819 
xfrm_state_put(struct xfrm_state * x)820 static inline void xfrm_state_put(struct xfrm_state *x)
821 {
822 	if (refcount_dec_and_test(&x->refcnt))
823 		__xfrm_state_destroy(x, false);
824 }
825 
xfrm_state_put_sync(struct xfrm_state * x)826 static inline void xfrm_state_put_sync(struct xfrm_state *x)
827 {
828 	if (refcount_dec_and_test(&x->refcnt))
829 		__xfrm_state_destroy(x, true);
830 }
831 
xfrm_state_hold(struct xfrm_state * x)832 static inline void xfrm_state_hold(struct xfrm_state *x)
833 {
834 	refcount_inc(&x->refcnt);
835 }
836 
addr_match(const void * token1,const void * token2,unsigned int prefixlen)837 static inline bool addr_match(const void *token1, const void *token2,
838 			      unsigned int prefixlen)
839 {
840 	const __be32 *a1 = token1;
841 	const __be32 *a2 = token2;
842 	unsigned int pdw;
843 	unsigned int pbi;
844 
845 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
846 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
847 
848 	if (pdw)
849 		if (memcmp(a1, a2, pdw << 2))
850 			return false;
851 
852 	if (pbi) {
853 		__be32 mask;
854 
855 		mask = htonl((0xffffffff) << (32 - pbi));
856 
857 		if ((a1[pdw] ^ a2[pdw]) & mask)
858 			return false;
859 	}
860 
861 	return true;
862 }
863 
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)864 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
865 {
866 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
867 	if (sizeof(long) == 4 && prefixlen == 0)
868 		return true;
869 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
870 }
871 
872 static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)873 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
874 {
875 	__be16 port;
876 	switch(fl->flowi_proto) {
877 	case IPPROTO_TCP:
878 	case IPPROTO_UDP:
879 	case IPPROTO_UDPLITE:
880 	case IPPROTO_SCTP:
881 		port = uli->ports.sport;
882 		break;
883 	case IPPROTO_ICMP:
884 	case IPPROTO_ICMPV6:
885 		port = htons(uli->icmpt.type);
886 		break;
887 	case IPPROTO_MH:
888 		port = htons(uli->mht.type);
889 		break;
890 	case IPPROTO_GRE:
891 		port = htons(ntohl(uli->gre_key) >> 16);
892 		break;
893 	default:
894 		port = 0;	/*XXX*/
895 	}
896 	return port;
897 }
898 
899 static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)900 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
901 {
902 	__be16 port;
903 	switch(fl->flowi_proto) {
904 	case IPPROTO_TCP:
905 	case IPPROTO_UDP:
906 	case IPPROTO_UDPLITE:
907 	case IPPROTO_SCTP:
908 		port = uli->ports.dport;
909 		break;
910 	case IPPROTO_ICMP:
911 	case IPPROTO_ICMPV6:
912 		port = htons(uli->icmpt.code);
913 		break;
914 	case IPPROTO_GRE:
915 		port = htons(ntohl(uli->gre_key) & 0xffff);
916 		break;
917 	default:
918 		port = 0;	/*XXX*/
919 	}
920 	return port;
921 }
922 
923 bool xfrm_selector_match(const struct xfrm_selector *sel,
924 			 const struct flowi *fl, unsigned short family);
925 
926 #ifdef CONFIG_SECURITY_NETWORK_XFRM
927 /*	If neither has a context --> match
928  * 	Otherwise, both must have a context and the sids, doi, alg must match
929  */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)930 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
931 {
932 	return ((!s1 && !s2) ||
933 		(s1 && s2 &&
934 		 (s1->ctx_sid == s2->ctx_sid) &&
935 		 (s1->ctx_doi == s2->ctx_doi) &&
936 		 (s1->ctx_alg == s2->ctx_alg)));
937 }
938 #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)939 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
940 {
941 	return true;
942 }
943 #endif
944 
945 /* A struct encoding bundle of transformations to apply to some set of flow.
946  *
947  * xdst->child points to the next element of bundle.
948  * dst->xfrm  points to an instanse of transformer.
949  *
950  * Due to unfortunate limitations of current routing cache, which we
951  * have no time to fix, it mirrors struct rtable and bound to the same
952  * routing key, including saddr,daddr. However, we can have many of
953  * bundles differing by session id. All the bundles grow from a parent
954  * policy rule.
955  */
956 struct xfrm_dst {
957 	union {
958 		struct dst_entry	dst;
959 		struct rtable		rt;
960 		struct rt6_info		rt6;
961 	} u;
962 	struct dst_entry *route;
963 	struct dst_entry *child;
964 	struct dst_entry *path;
965 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
966 	int num_pols, num_xfrms;
967 	u32 xfrm_genid;
968 	u32 policy_genid;
969 	u32 route_mtu_cached;
970 	u32 child_mtu_cached;
971 	u32 route_cookie;
972 	u32 path_cookie;
973 };
974 
xfrm_dst_path(const struct dst_entry * dst)975 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
976 {
977 #ifdef CONFIG_XFRM
978 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
979 		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
980 
981 		return xdst->path;
982 	}
983 #endif
984 	return (struct dst_entry *) dst;
985 }
986 
xfrm_dst_child(const struct dst_entry * dst)987 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
988 {
989 #ifdef CONFIG_XFRM
990 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
991 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
992 		return xdst->child;
993 	}
994 #endif
995 	return NULL;
996 }
997 
998 #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)999 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1000 {
1001 	xdst->child = child;
1002 }
1003 
xfrm_dst_destroy(struct xfrm_dst * xdst)1004 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1005 {
1006 	xfrm_pols_put(xdst->pols, xdst->num_pols);
1007 	dst_release(xdst->route);
1008 	if (likely(xdst->u.dst.xfrm))
1009 		xfrm_state_put(xdst->u.dst.xfrm);
1010 }
1011 #endif
1012 
1013 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1014 
1015 struct xfrm_if_parms {
1016 	int link;		/* ifindex of underlying L2 interface */
1017 	u32 if_id;		/* interface identifyer */
1018 	bool collect_md;
1019 };
1020 
1021 struct xfrm_if {
1022 	struct xfrm_if __rcu *next;	/* next interface in list */
1023 	struct net_device *dev;		/* virtual device associated with interface */
1024 	struct net *net;		/* netns for packet i/o */
1025 	struct xfrm_if_parms p;		/* interface parms */
1026 
1027 	struct gro_cells gro_cells;
1028 };
1029 
1030 struct xfrm_offload {
1031 	/* Output sequence number for replay protection on offloading. */
1032 	struct {
1033 		__u32 low;
1034 		__u32 hi;
1035 	} seq;
1036 
1037 	__u32			flags;
1038 #define	SA_DELETE_REQ		1
1039 #define	CRYPTO_DONE		2
1040 #define	CRYPTO_NEXT_DONE	4
1041 #define	CRYPTO_FALLBACK		8
1042 #define	XFRM_GSO_SEGMENT	16
1043 #define	XFRM_GRO		32
1044 /* 64 is free */
1045 #define	XFRM_DEV_RESUME		128
1046 #define	XFRM_XMIT		256
1047 
1048 	__u32			status;
1049 #define CRYPTO_SUCCESS				1
1050 #define CRYPTO_GENERIC_ERROR			2
1051 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1052 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1053 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1054 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1055 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1056 #define CRYPTO_INVALID_PROTOCOL			128
1057 
1058 	/* Used to keep whole l2 header for transport mode GRO */
1059 	__u32			orig_mac_len;
1060 
1061 	__u8			proto;
1062 	__u8			inner_ipproto;
1063 };
1064 
1065 struct sec_path {
1066 	int			len;
1067 	int			olen;
1068 	int			verified_cnt;
1069 
1070 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1071 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1072 };
1073 
1074 struct sec_path *secpath_set(struct sk_buff *skb);
1075 
1076 static inline void
secpath_reset(struct sk_buff * skb)1077 secpath_reset(struct sk_buff *skb)
1078 {
1079 #ifdef CONFIG_XFRM
1080 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1081 #endif
1082 }
1083 
1084 static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1085 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1086 {
1087 	switch (family) {
1088 	case AF_INET:
1089 		return addr->a4 == 0;
1090 	case AF_INET6:
1091 		return ipv6_addr_any(&addr->in6);
1092 	}
1093 	return 0;
1094 }
1095 
1096 static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1097 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1098 {
1099 	return	(tmpl->saddr.a4 &&
1100 		 tmpl->saddr.a4 != x->props.saddr.a4);
1101 }
1102 
1103 static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1104 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1105 {
1106 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1107 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1108 }
1109 
1110 static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1111 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1112 {
1113 	switch (family) {
1114 	case AF_INET:
1115 		return __xfrm4_state_addr_cmp(tmpl, x);
1116 	case AF_INET6:
1117 		return __xfrm6_state_addr_cmp(tmpl, x);
1118 	}
1119 	return !0;
1120 }
1121 
1122 #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1123 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1124 {
1125 	struct sec_path *sp = skb_sec_path(skb);
1126 
1127 	return sp->xvec[sp->len - 1];
1128 }
1129 #endif
1130 
xfrm_offload(struct sk_buff * skb)1131 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1132 {
1133 #ifdef CONFIG_XFRM
1134 	struct sec_path *sp = skb_sec_path(skb);
1135 
1136 	if (!sp || !sp->olen || sp->len != sp->olen)
1137 		return NULL;
1138 
1139 	return &sp->ovec[sp->olen - 1];
1140 #else
1141 	return NULL;
1142 #endif
1143 }
1144 
1145 #ifdef CONFIG_XFRM
1146 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1147 			unsigned short family);
1148 
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1149 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1150 					 int dir)
1151 {
1152 	if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1153 		return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1154 
1155 	return false;
1156 }
1157 
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1158 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1159 					     int dir, unsigned short family)
1160 {
1161 	if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1162 		/* same dst may be used for traffic originating from
1163 		 * devices with different policy settings.
1164 		 */
1165 		return IPCB(skb)->flags & IPSKB_NOPOLICY;
1166 	}
1167 	return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1168 }
1169 
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1170 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1171 				       struct sk_buff *skb,
1172 				       unsigned int family, int reverse)
1173 {
1174 	struct net *net = dev_net(skb->dev);
1175 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1176 	struct xfrm_offload *xo = xfrm_offload(skb);
1177 	struct xfrm_state *x;
1178 
1179 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1180 		return __xfrm_policy_check(sk, ndir, skb, family);
1181 
1182 	if (xo) {
1183 		x = xfrm_input_state(skb);
1184 		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1185 			return (xo->flags & CRYPTO_DONE) &&
1186 			       (xo->status & CRYPTO_SUCCESS);
1187 	}
1188 
1189 	return __xfrm_check_nopolicy(net, skb, dir) ||
1190 	       __xfrm_check_dev_nopolicy(skb, dir, family) ||
1191 	       __xfrm_policy_check(sk, ndir, skb, family);
1192 }
1193 
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1194 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1195 {
1196 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1197 }
1198 
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1199 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1200 {
1201 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1202 }
1203 
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1204 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1205 {
1206 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1207 }
1208 
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1209 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1210 					     struct sk_buff *skb)
1211 {
1212 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1213 }
1214 
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1215 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1216 					     struct sk_buff *skb)
1217 {
1218 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1219 }
1220 
1221 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1222 			  unsigned int family, int reverse);
1223 
xfrm_decode_session(struct sk_buff * skb,struct flowi * fl,unsigned int family)1224 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1225 				      unsigned int family)
1226 {
1227 	return __xfrm_decode_session(skb, fl, family, 0);
1228 }
1229 
xfrm_decode_session_reverse(struct sk_buff * skb,struct flowi * fl,unsigned int family)1230 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1231 					      struct flowi *fl,
1232 					      unsigned int family)
1233 {
1234 	return __xfrm_decode_session(skb, fl, family, 1);
1235 }
1236 
1237 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1238 
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1239 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1240 {
1241 	struct net *net = dev_net(skb->dev);
1242 
1243 	if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1244 	    net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1245 		return true;
1246 
1247 	return (skb_dst(skb)->flags & DST_NOXFRM) ||
1248 	       __xfrm_route_forward(skb, family);
1249 }
1250 
xfrm4_route_forward(struct sk_buff * skb)1251 static inline int xfrm4_route_forward(struct sk_buff *skb)
1252 {
1253 	return xfrm_route_forward(skb, AF_INET);
1254 }
1255 
xfrm6_route_forward(struct sk_buff * skb)1256 static inline int xfrm6_route_forward(struct sk_buff *skb)
1257 {
1258 	return xfrm_route_forward(skb, AF_INET6);
1259 }
1260 
1261 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1262 
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1263 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1264 {
1265 	if (!sk_fullsock(osk))
1266 		return 0;
1267 	sk->sk_policy[0] = NULL;
1268 	sk->sk_policy[1] = NULL;
1269 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1270 		return __xfrm_sk_clone_policy(sk, osk);
1271 	return 0;
1272 }
1273 
1274 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1275 
xfrm_sk_free_policy(struct sock * sk)1276 static inline void xfrm_sk_free_policy(struct sock *sk)
1277 {
1278 	struct xfrm_policy *pol;
1279 
1280 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1281 	if (unlikely(pol != NULL)) {
1282 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1283 		sk->sk_policy[0] = NULL;
1284 	}
1285 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1286 	if (unlikely(pol != NULL)) {
1287 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1288 		sk->sk_policy[1] = NULL;
1289 	}
1290 }
1291 
1292 #else
1293 
xfrm_sk_free_policy(struct sock * sk)1294 static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1295 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1296 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1297 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1298 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1299 {
1300 	return 1;
1301 }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1302 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1303 {
1304 	return 1;
1305 }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1306 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1307 {
1308 	return 1;
1309 }
xfrm_decode_session_reverse(struct sk_buff * skb,struct flowi * fl,unsigned int family)1310 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1311 					      struct flowi *fl,
1312 					      unsigned int family)
1313 {
1314 	return -ENOSYS;
1315 }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1316 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1317 					     struct sk_buff *skb)
1318 {
1319 	return 1;
1320 }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1321 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1322 					     struct sk_buff *skb)
1323 {
1324 	return 1;
1325 }
1326 #endif
1327 
1328 static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1329 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1330 {
1331 	switch (family){
1332 	case AF_INET:
1333 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1334 	case AF_INET6:
1335 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1336 	}
1337 	return NULL;
1338 }
1339 
1340 static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1341 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1342 {
1343 	switch (family){
1344 	case AF_INET:
1345 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1346 	case AF_INET6:
1347 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1348 	}
1349 	return NULL;
1350 }
1351 
1352 static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1353 void xfrm_flowi_addr_get(const struct flowi *fl,
1354 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1355 			 unsigned short family)
1356 {
1357 	switch(family) {
1358 	case AF_INET:
1359 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1360 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1361 		break;
1362 	case AF_INET6:
1363 		saddr->in6 = fl->u.ip6.saddr;
1364 		daddr->in6 = fl->u.ip6.daddr;
1365 		break;
1366 	}
1367 }
1368 
1369 static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1370 __xfrm4_state_addr_check(const struct xfrm_state *x,
1371 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1372 {
1373 	if (daddr->a4 == x->id.daddr.a4 &&
1374 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1375 		return 1;
1376 	return 0;
1377 }
1378 
1379 static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1380 __xfrm6_state_addr_check(const struct xfrm_state *x,
1381 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1382 {
1383 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1384 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1385 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1386 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1387 		return 1;
1388 	return 0;
1389 }
1390 
1391 static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1392 xfrm_state_addr_check(const struct xfrm_state *x,
1393 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1394 		      unsigned short family)
1395 {
1396 	switch (family) {
1397 	case AF_INET:
1398 		return __xfrm4_state_addr_check(x, daddr, saddr);
1399 	case AF_INET6:
1400 		return __xfrm6_state_addr_check(x, daddr, saddr);
1401 	}
1402 	return 0;
1403 }
1404 
1405 static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1406 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1407 			   unsigned short family)
1408 {
1409 	switch (family) {
1410 	case AF_INET:
1411 		return __xfrm4_state_addr_check(x,
1412 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1413 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1414 	case AF_INET6:
1415 		return __xfrm6_state_addr_check(x,
1416 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1417 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1418 	}
1419 	return 0;
1420 }
1421 
xfrm_state_kern(const struct xfrm_state * x)1422 static inline int xfrm_state_kern(const struct xfrm_state *x)
1423 {
1424 	return atomic_read(&x->tunnel_users);
1425 }
1426 
xfrm_id_proto_valid(u8 proto)1427 static inline bool xfrm_id_proto_valid(u8 proto)
1428 {
1429 	switch (proto) {
1430 	case IPPROTO_AH:
1431 	case IPPROTO_ESP:
1432 	case IPPROTO_COMP:
1433 #if IS_ENABLED(CONFIG_IPV6)
1434 	case IPPROTO_ROUTING:
1435 	case IPPROTO_DSTOPTS:
1436 #endif
1437 		return true;
1438 	default:
1439 		return false;
1440 	}
1441 }
1442 
1443 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1444 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1445 {
1446 	return (!userproto || proto == userproto ||
1447 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1448 						  proto == IPPROTO_ESP ||
1449 						  proto == IPPROTO_COMP)));
1450 }
1451 
1452 /*
1453  * xfrm algorithm information
1454  */
1455 struct xfrm_algo_aead_info {
1456 	char *geniv;
1457 	u16 icv_truncbits;
1458 };
1459 
1460 struct xfrm_algo_auth_info {
1461 	u16 icv_truncbits;
1462 	u16 icv_fullbits;
1463 };
1464 
1465 struct xfrm_algo_encr_info {
1466 	char *geniv;
1467 	u16 blockbits;
1468 	u16 defkeybits;
1469 };
1470 
1471 struct xfrm_algo_comp_info {
1472 	u16 threshold;
1473 };
1474 
1475 struct xfrm_algo_desc {
1476 	char *name;
1477 	char *compat;
1478 	u8 available:1;
1479 	u8 pfkey_supported:1;
1480 	union {
1481 		struct xfrm_algo_aead_info aead;
1482 		struct xfrm_algo_auth_info auth;
1483 		struct xfrm_algo_encr_info encr;
1484 		struct xfrm_algo_comp_info comp;
1485 	} uinfo;
1486 	struct sadb_alg desc;
1487 };
1488 
1489 /* XFRM protocol handlers.  */
1490 struct xfrm4_protocol {
1491 	int (*handler)(struct sk_buff *skb);
1492 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1493 			     int encap_type);
1494 	int (*cb_handler)(struct sk_buff *skb, int err);
1495 	int (*err_handler)(struct sk_buff *skb, u32 info);
1496 
1497 	struct xfrm4_protocol __rcu *next;
1498 	int priority;
1499 };
1500 
1501 struct xfrm6_protocol {
1502 	int (*handler)(struct sk_buff *skb);
1503 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1504 			     int encap_type);
1505 	int (*cb_handler)(struct sk_buff *skb, int err);
1506 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1507 			   u8 type, u8 code, int offset, __be32 info);
1508 
1509 	struct xfrm6_protocol __rcu *next;
1510 	int priority;
1511 };
1512 
1513 /* XFRM tunnel handlers.  */
1514 struct xfrm_tunnel {
1515 	int (*handler)(struct sk_buff *skb);
1516 	int (*cb_handler)(struct sk_buff *skb, int err);
1517 	int (*err_handler)(struct sk_buff *skb, u32 info);
1518 
1519 	struct xfrm_tunnel __rcu *next;
1520 	int priority;
1521 };
1522 
1523 struct xfrm6_tunnel {
1524 	int (*handler)(struct sk_buff *skb);
1525 	int (*cb_handler)(struct sk_buff *skb, int err);
1526 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1527 			   u8 type, u8 code, int offset, __be32 info);
1528 	struct xfrm6_tunnel __rcu *next;
1529 	int priority;
1530 };
1531 
1532 void xfrm_init(void);
1533 void xfrm4_init(void);
1534 int xfrm_state_init(struct net *net);
1535 void xfrm_state_fini(struct net *net);
1536 void xfrm4_state_init(void);
1537 void xfrm4_protocol_init(void);
1538 #ifdef CONFIG_XFRM
1539 int xfrm6_init(void);
1540 void xfrm6_fini(void);
1541 int xfrm6_state_init(void);
1542 void xfrm6_state_fini(void);
1543 int xfrm6_protocol_init(void);
1544 void xfrm6_protocol_fini(void);
1545 #else
xfrm6_init(void)1546 static inline int xfrm6_init(void)
1547 {
1548 	return 0;
1549 }
xfrm6_fini(void)1550 static inline void xfrm6_fini(void)
1551 {
1552 	;
1553 }
1554 #endif
1555 
1556 #ifdef CONFIG_XFRM_STATISTICS
1557 int xfrm_proc_init(struct net *net);
1558 void xfrm_proc_fini(struct net *net);
1559 #endif
1560 
1561 int xfrm_sysctl_init(struct net *net);
1562 #ifdef CONFIG_SYSCTL
1563 void xfrm_sysctl_fini(struct net *net);
1564 #else
xfrm_sysctl_fini(struct net * net)1565 static inline void xfrm_sysctl_fini(struct net *net)
1566 {
1567 }
1568 #endif
1569 
1570 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1571 			  struct xfrm_address_filter *filter);
1572 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1573 		    int (*func)(struct xfrm_state *, int, void*), void *);
1574 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1575 struct xfrm_state *xfrm_state_alloc(struct net *net);
1576 void xfrm_state_free(struct xfrm_state *x);
1577 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1578 				   const xfrm_address_t *saddr,
1579 				   const struct flowi *fl,
1580 				   struct xfrm_tmpl *tmpl,
1581 				   struct xfrm_policy *pol, int *err,
1582 				   unsigned short family, u32 if_id);
1583 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1584 				       xfrm_address_t *daddr,
1585 				       xfrm_address_t *saddr,
1586 				       unsigned short family,
1587 				       u8 mode, u8 proto, u32 reqid);
1588 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1589 					      unsigned short family);
1590 int xfrm_state_check_expire(struct xfrm_state *x);
1591 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_curlft(struct xfrm_state * x)1592 static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
1593 {
1594 	struct xfrm_dev_offload *xdo = &x->xso;
1595 	struct net_device *dev = READ_ONCE(xdo->dev);
1596 
1597 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1598 		return;
1599 
1600 	if (dev && dev->xfrmdev_ops &&
1601 	    dev->xfrmdev_ops->xdo_dev_state_update_curlft)
1602 		dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
1603 
1604 }
1605 #else
xfrm_dev_state_update_curlft(struct xfrm_state * x)1606 static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
1607 #endif
1608 void xfrm_state_insert(struct xfrm_state *x);
1609 int xfrm_state_add(struct xfrm_state *x);
1610 int xfrm_state_update(struct xfrm_state *x);
1611 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1612 				     const xfrm_address_t *daddr, __be32 spi,
1613 				     u8 proto, unsigned short family);
1614 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1615 					    const xfrm_address_t *daddr,
1616 					    const xfrm_address_t *saddr,
1617 					    u8 proto,
1618 					    unsigned short family);
1619 #ifdef CONFIG_XFRM_SUB_POLICY
1620 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1621 		    unsigned short family);
1622 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1623 		     unsigned short family);
1624 #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1625 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1626 				  int n, unsigned short family)
1627 {
1628 }
1629 
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1630 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1631 				   int n, unsigned short family)
1632 {
1633 }
1634 #endif
1635 
1636 struct xfrmk_sadinfo {
1637 	u32 sadhcnt; /* current hash bkts */
1638 	u32 sadhmcnt; /* max allowed hash bkts */
1639 	u32 sadcnt; /* current running count */
1640 };
1641 
1642 struct xfrmk_spdinfo {
1643 	u32 incnt;
1644 	u32 outcnt;
1645 	u32 fwdcnt;
1646 	u32 inscnt;
1647 	u32 outscnt;
1648 	u32 fwdscnt;
1649 	u32 spdhcnt;
1650 	u32 spdhmcnt;
1651 };
1652 
1653 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1654 int xfrm_state_delete(struct xfrm_state *x);
1655 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1656 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1657 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1658 			  bool task_valid);
1659 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1660 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1661 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1662 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1663 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1664 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
1665 		      struct netlink_ext_ack *extack);
1666 int xfrm_init_state(struct xfrm_state *x);
1667 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1668 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1669 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1670 			 int (*finish)(struct net *, struct sock *,
1671 				       struct sk_buff *));
1672 int xfrm_trans_queue(struct sk_buff *skb,
1673 		     int (*finish)(struct net *, struct sock *,
1674 				   struct sk_buff *));
1675 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1676 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1677 
1678 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1679 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1680 #endif
1681 
1682 void xfrm_local_error(struct sk_buff *skb, int mtu);
1683 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1684 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1685 		    int encap_type);
1686 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1687 int xfrm4_rcv(struct sk_buff *skb);
1688 
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1689 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1690 {
1691 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1692 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1693 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1694 	return xfrm_input(skb, nexthdr, spi, 0);
1695 }
1696 
1697 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1698 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1699 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1700 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1701 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1702 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1703 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1704 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1705 		  struct ip6_tnl *t);
1706 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1707 		    int encap_type);
1708 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1709 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1710 int xfrm6_rcv(struct sk_buff *skb);
1711 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1712 		     xfrm_address_t *saddr, u8 proto);
1713 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1714 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1715 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1716 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1717 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1718 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1719 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1720 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1721 
1722 #ifdef CONFIG_XFRM
1723 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1724 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1725 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1726 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1727 		     int optlen);
1728 #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1729 static inline int xfrm_user_policy(struct sock *sk, int optname,
1730 				   sockptr_t optval, int optlen)
1731 {
1732  	return -ENOPROTOOPT;
1733 }
1734 #endif
1735 
1736 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1737 
1738 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1739 
1740 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1741 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1742 		     int (*func)(struct xfrm_policy *, int, int, void*),
1743 		     void *);
1744 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1745 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1746 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1747 					  const struct xfrm_mark *mark,
1748 					  u32 if_id, u8 type, int dir,
1749 					  struct xfrm_selector *sel,
1750 					  struct xfrm_sec_ctx *ctx, int delete,
1751 					  int *err);
1752 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1753 				     const struct xfrm_mark *mark, u32 if_id,
1754 				     u8 type, int dir, u32 id, int delete,
1755 				     int *err);
1756 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1757 void xfrm_policy_hash_rebuild(struct net *net);
1758 u32 xfrm_get_acqseq(void);
1759 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1760 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1761 		   struct netlink_ext_ack *extack);
1762 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1763 				 u8 mode, u32 reqid, u32 if_id, u8 proto,
1764 				 const xfrm_address_t *daddr,
1765 				 const xfrm_address_t *saddr, int create,
1766 				 unsigned short family);
1767 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1768 
1769 #ifdef CONFIG_XFRM_MIGRATE
1770 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1771 	       const struct xfrm_migrate *m, int num_bundles,
1772 	       const struct xfrm_kmaddress *k,
1773 	       const struct xfrm_encap_tmpl *encap);
1774 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1775 						u32 if_id);
1776 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1777 				      struct xfrm_migrate *m,
1778 				      struct xfrm_encap_tmpl *encap);
1779 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1780 		 struct xfrm_migrate *m, int num_bundles,
1781 		 struct xfrm_kmaddress *k, struct net *net,
1782 		 struct xfrm_encap_tmpl *encap, u32 if_id,
1783 		 struct netlink_ext_ack *extack);
1784 #endif
1785 
1786 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1787 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1788 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1789 	      xfrm_address_t *addr);
1790 
1791 void xfrm_input_init(void);
1792 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1793 
1794 void xfrm_probe_algs(void);
1795 int xfrm_count_pfkey_auth_supported(void);
1796 int xfrm_count_pfkey_enc_supported(void);
1797 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1798 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1799 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1800 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1801 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1802 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1803 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1804 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1805 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1806 					    int probe);
1807 
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1808 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1809 				    const xfrm_address_t *b)
1810 {
1811 	return ipv6_addr_equal((const struct in6_addr *)a,
1812 			       (const struct in6_addr *)b);
1813 }
1814 
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1815 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1816 				   const xfrm_address_t *b,
1817 				   sa_family_t family)
1818 {
1819 	switch (family) {
1820 	default:
1821 	case AF_INET:
1822 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1823 	case AF_INET6:
1824 		return xfrm6_addr_equal(a, b);
1825 	}
1826 }
1827 
xfrm_policy_id2dir(u32 index)1828 static inline int xfrm_policy_id2dir(u32 index)
1829 {
1830 	return index & 7;
1831 }
1832 
1833 #ifdef CONFIG_XFRM
1834 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1835 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1836 void xfrm_replay_notify(struct xfrm_state *x, int event);
1837 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1838 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1839 
xfrm_aevent_is_on(struct net * net)1840 static inline int xfrm_aevent_is_on(struct net *net)
1841 {
1842 	struct sock *nlsk;
1843 	int ret = 0;
1844 
1845 	rcu_read_lock();
1846 	nlsk = rcu_dereference(net->xfrm.nlsk);
1847 	if (nlsk)
1848 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1849 	rcu_read_unlock();
1850 	return ret;
1851 }
1852 
xfrm_acquire_is_on(struct net * net)1853 static inline int xfrm_acquire_is_on(struct net *net)
1854 {
1855 	struct sock *nlsk;
1856 	int ret = 0;
1857 
1858 	rcu_read_lock();
1859 	nlsk = rcu_dereference(net->xfrm.nlsk);
1860 	if (nlsk)
1861 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1862 	rcu_read_unlock();
1863 
1864 	return ret;
1865 }
1866 #endif
1867 
aead_len(struct xfrm_algo_aead * alg)1868 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1869 {
1870 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1871 }
1872 
xfrm_alg_len(const struct xfrm_algo * alg)1873 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1874 {
1875 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1876 }
1877 
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)1878 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1879 {
1880 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1881 }
1882 
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)1883 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1884 {
1885 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1886 }
1887 
1888 #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)1889 static inline int xfrm_replay_clone(struct xfrm_state *x,
1890 				     struct xfrm_state *orig)
1891 {
1892 
1893 	x->replay_esn = kmemdup(orig->replay_esn,
1894 				xfrm_replay_state_esn_len(orig->replay_esn),
1895 				GFP_KERNEL);
1896 	if (!x->replay_esn)
1897 		return -ENOMEM;
1898 	x->preplay_esn = kmemdup(orig->preplay_esn,
1899 				 xfrm_replay_state_esn_len(orig->preplay_esn),
1900 				 GFP_KERNEL);
1901 	if (!x->preplay_esn)
1902 		return -ENOMEM;
1903 
1904 	return 0;
1905 }
1906 
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)1907 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
1908 {
1909 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
1910 }
1911 
1912 
xfrm_algo_clone(struct xfrm_algo * orig)1913 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1914 {
1915 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1916 }
1917 
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)1918 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1919 {
1920 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1921 }
1922 
xfrm_states_put(struct xfrm_state ** states,int n)1923 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1924 {
1925 	int i;
1926 	for (i = 0; i < n; i++)
1927 		xfrm_state_put(*(states + i));
1928 }
1929 
xfrm_states_delete(struct xfrm_state ** states,int n)1930 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1931 {
1932 	int i;
1933 	for (i = 0; i < n; i++)
1934 		xfrm_state_delete(*(states + i));
1935 }
1936 #endif
1937 
1938 void __init xfrm_dev_init(void);
1939 
1940 #ifdef CONFIG_XFRM_OFFLOAD
1941 void xfrm_dev_resume(struct sk_buff *skb);
1942 void xfrm_dev_backlog(struct softnet_data *sd);
1943 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
1944 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1945 		       struct xfrm_user_offload *xuo,
1946 		       struct netlink_ext_ack *extack);
1947 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
1948 			struct xfrm_user_offload *xuo, u8 dir,
1949 			struct netlink_ext_ack *extack);
1950 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1951 void xfrm_dev_state_delete(struct xfrm_state *x);
1952 void xfrm_dev_state_free(struct xfrm_state *x);
1953 
xfrm_dev_state_advance_esn(struct xfrm_state * x)1954 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1955 {
1956 	struct xfrm_dev_offload *xso = &x->xso;
1957 	struct net_device *dev = READ_ONCE(xso->dev);
1958 
1959 	if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
1960 		dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
1961 }
1962 
xfrm_dst_offload_ok(struct dst_entry * dst)1963 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1964 {
1965 	struct xfrm_state *x = dst->xfrm;
1966 	struct xfrm_dst *xdst;
1967 
1968 	if (!x || !x->type_offload)
1969 		return false;
1970 
1971 	xdst = (struct xfrm_dst *) dst;
1972 	if (!x->xso.offload_handle && !xdst->child->xfrm)
1973 		return true;
1974 	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
1975 	    !xdst->child->xfrm)
1976 		return true;
1977 
1978 	return false;
1979 }
1980 
xfrm_dev_policy_delete(struct xfrm_policy * x)1981 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
1982 {
1983 	struct xfrm_dev_offload *xdo = &x->xdo;
1984 	struct net_device *dev = xdo->dev;
1985 
1986 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
1987 		dev->xfrmdev_ops->xdo_dev_policy_delete(x);
1988 }
1989 
xfrm_dev_policy_free(struct xfrm_policy * x)1990 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
1991 {
1992 	struct xfrm_dev_offload *xdo = &x->xdo;
1993 	struct net_device *dev = xdo->dev;
1994 
1995 	if (dev && dev->xfrmdev_ops) {
1996 		if (dev->xfrmdev_ops->xdo_dev_policy_free)
1997 			dev->xfrmdev_ops->xdo_dev_policy_free(x);
1998 		xdo->dev = NULL;
1999 		netdev_put(dev, &xdo->dev_tracker);
2000 	}
2001 }
2002 #else
xfrm_dev_resume(struct sk_buff * skb)2003 static inline void xfrm_dev_resume(struct sk_buff *skb)
2004 {
2005 }
2006 
xfrm_dev_backlog(struct softnet_data * sd)2007 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2008 {
2009 }
2010 
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2011 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2012 {
2013 	return skb;
2014 }
2015 
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2016 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2017 {
2018 	return 0;
2019 }
2020 
xfrm_dev_state_delete(struct xfrm_state * x)2021 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2022 {
2023 }
2024 
xfrm_dev_state_free(struct xfrm_state * x)2025 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2026 {
2027 }
2028 
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2029 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2030 				      struct xfrm_user_offload *xuo, u8 dir,
2031 				      struct netlink_ext_ack *extack)
2032 {
2033 	return 0;
2034 }
2035 
xfrm_dev_policy_delete(struct xfrm_policy * x)2036 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2037 {
2038 }
2039 
xfrm_dev_policy_free(struct xfrm_policy * x)2040 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2041 {
2042 }
2043 
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2044 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2045 {
2046 	return false;
2047 }
2048 
xfrm_dev_state_advance_esn(struct xfrm_state * x)2049 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2050 {
2051 }
2052 
xfrm_dst_offload_ok(struct dst_entry * dst)2053 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2054 {
2055 	return false;
2056 }
2057 #endif
2058 
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2059 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2060 {
2061 	if (attrs[XFRMA_MARK])
2062 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2063 	else
2064 		m->v = m->m = 0;
2065 
2066 	return m->v & m->m;
2067 }
2068 
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2069 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2070 {
2071 	int ret = 0;
2072 
2073 	if (m->m | m->v)
2074 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2075 	return ret;
2076 }
2077 
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2078 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2079 {
2080 	struct xfrm_mark *m = &x->props.smark;
2081 
2082 	return (m->v & m->m) | (mark & ~m->m);
2083 }
2084 
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2085 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2086 {
2087 	int ret = 0;
2088 
2089 	if (if_id)
2090 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2091 	return ret;
2092 }
2093 
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2094 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2095 				    unsigned int family)
2096 {
2097 	bool tunnel = false;
2098 
2099 	switch(family) {
2100 	case AF_INET:
2101 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2102 			tunnel = true;
2103 		break;
2104 	case AF_INET6:
2105 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2106 			tunnel = true;
2107 		break;
2108 	}
2109 	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2110 		return -EINVAL;
2111 
2112 	return 0;
2113 }
2114 
2115 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2116 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2117 
2118 struct xfrm_translator {
2119 	/* Allocate frag_list and put compat translation there */
2120 	int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2121 
2122 	/* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2123 	struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2124 			int maxtype, const struct nla_policy *policy,
2125 			struct netlink_ext_ack *extack);
2126 
2127 	/* Translate 32-bit user_policy from sockptr */
2128 	int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2129 
2130 	struct module *owner;
2131 };
2132 
2133 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2134 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2135 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2136 extern struct xfrm_translator *xfrm_get_translator(void);
2137 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2138 #else
xfrm_get_translator(void)2139 static inline struct xfrm_translator *xfrm_get_translator(void)
2140 {
2141 	return NULL;
2142 }
xfrm_put_translator(struct xfrm_translator * xtr)2143 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2144 {
2145 }
2146 #endif
2147 
2148 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2149 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2150 {
2151 	int proto;
2152 
2153 	if (!sk || sk->sk_family != AF_INET6)
2154 		return false;
2155 
2156 	proto = sk->sk_protocol;
2157 	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2158 		return inet6_sk(sk)->dontfrag;
2159 
2160 	return false;
2161 }
2162 #endif
2163 
2164 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2165     (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2166 
2167 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2168 
2169 int register_xfrm_interface_bpf(void);
2170 
2171 #else
2172 
register_xfrm_interface_bpf(void)2173 static inline int register_xfrm_interface_bpf(void)
2174 {
2175 	return 0;
2176 }
2177 
2178 #endif
2179 
2180 #endif	/* _NET_XFRM_H */
2181