xref: /openbmc/linux/include/net/xfrm.h (revision 060f35a317ef09101b128f399dce7ed13d019461)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _NET_XFRM_H
3  #define _NET_XFRM_H
4  
5  #include <linux/compiler.h>
6  #include <linux/xfrm.h>
7  #include <linux/spinlock.h>
8  #include <linux/list.h>
9  #include <linux/skbuff.h>
10  #include <linux/socket.h>
11  #include <linux/pfkeyv2.h>
12  #include <linux/ipsec.h>
13  #include <linux/in6.h>
14  #include <linux/mutex.h>
15  #include <linux/audit.h>
16  #include <linux/slab.h>
17  #include <linux/refcount.h>
18  #include <linux/sockptr.h>
19  
20  #include <net/sock.h>
21  #include <net/dst.h>
22  #include <net/ip.h>
23  #include <net/route.h>
24  #include <net/ipv6.h>
25  #include <net/ip6_fib.h>
26  #include <net/flow.h>
27  #include <net/gro_cells.h>
28  
29  #include <linux/interrupt.h>
30  
31  #ifdef CONFIG_XFRM_STATISTICS
32  #include <net/snmp.h>
33  #endif
34  
35  #define XFRM_PROTO_ESP		50
36  #define XFRM_PROTO_AH		51
37  #define XFRM_PROTO_COMP		108
38  #define XFRM_PROTO_IPIP		4
39  #define XFRM_PROTO_IPV6		41
40  #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
41  #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
42  
43  #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
44  #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
45  #define MODULE_ALIAS_XFRM_MODE(family, encap) \
46  	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
47  #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
48  	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
49  #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
50  	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
51  
52  #ifdef CONFIG_XFRM_STATISTICS
53  #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
54  #else
55  #define XFRM_INC_STATS(net, field)	((void)(net))
56  #endif
57  
58  
59  /* Organization of SPD aka "XFRM rules"
60     ------------------------------------
61  
62     Basic objects:
63     - policy rule, struct xfrm_policy (=SPD entry)
64     - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65     - instance of a transformer, struct xfrm_state (=SA)
66     - template to clone xfrm_state, struct xfrm_tmpl
67  
68     SPD is plain linear list of xfrm_policy rules, ordered by priority.
69     (To be compatible with existing pfkeyv2 implementations,
70     many rules with priority of 0x7fffffff are allowed to exist and
71     such rules are ordered in an unpredictable way, thanks to bsd folks.)
72  
73     Lookup is plain linear search until the first match with selector.
74  
75     If "action" is "block", then we prohibit the flow, otherwise:
76     if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
77     policy entry has list of up to XFRM_MAX_DEPTH transformations,
78     described by templates xfrm_tmpl. Each template is resolved
79     to a complete xfrm_state (see below) and we pack bundle of transformations
80     to a dst_entry returned to requestor.
81  
82     dst -. xfrm  .-> xfrm_state #1
83      |---. child .-> dst -. xfrm .-> xfrm_state #2
84                       |---. child .-> dst -. xfrm .-> xfrm_state #3
85                                        |---. child .-> NULL
86  
87     Bundles are cached at xrfm_policy struct (field ->bundles).
88  
89  
90     Resolution of xrfm_tmpl
91     -----------------------
92     Template contains:
93     1. ->mode		Mode: transport or tunnel
94     2. ->id.proto	Protocol: AH/ESP/IPCOMP
95     3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
96        Q: allow to resolve security gateway?
97     4. ->id.spi          If not zero, static SPI.
98     5. ->saddr		Local tunnel endpoint, ignored for transport mode.
99     6. ->algos		List of allowed algos. Plain bitmask now.
100        Q: ealgos, aalgos, calgos. What a mess...
101     7. ->share		Sharing mode.
102        Q: how to implement private sharing mode? To add struct sock* to
103        flow id?
104  
105     Having this template we search through SAD searching for entries
106     with appropriate mode/proto/algo, permitted by selector.
107     If no appropriate entry found, it is requested from key manager.
108  
109     PROBLEMS:
110     Q: How to find all the bundles referring to a physical path for
111        PMTU discovery? Seems, dst should contain list of all parents...
112        and enter to infinite locking hierarchy disaster.
113        No! It is easier, we will not search for them, let them find us.
114        We add genid to each dst plus pointer to genid of raw IP route,
115        pmtu disc will update pmtu on raw IP route and increase its genid.
116        dst_check() will see this for top level and trigger resyncing
117        metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
118   */
119  
120  struct xfrm_state_walk {
121  	struct list_head	all;
122  	u8			state;
123  	u8			dying;
124  	u8			proto;
125  	u32			seq;
126  	struct xfrm_address_filter *filter;
127  };
128  
129  enum {
130  	XFRM_DEV_OFFLOAD_IN = 1,
131  	XFRM_DEV_OFFLOAD_OUT,
132  	XFRM_DEV_OFFLOAD_FWD,
133  };
134  
135  enum {
136  	XFRM_DEV_OFFLOAD_UNSPECIFIED,
137  	XFRM_DEV_OFFLOAD_CRYPTO,
138  	XFRM_DEV_OFFLOAD_PACKET,
139  };
140  
141  enum {
142  	XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
143  };
144  
145  struct xfrm_dev_offload {
146  	struct net_device	*dev;
147  	netdevice_tracker	dev_tracker;
148  	struct net_device	*real_dev;
149  	unsigned long		offload_handle;
150  	u8			dir : 2;
151  	u8			type : 2;
152  	u8			flags : 2;
153  };
154  
155  struct xfrm_mode {
156  	u8 encap;
157  	u8 family;
158  	u8 flags;
159  };
160  
161  /* Flags for xfrm_mode. */
162  enum {
163  	XFRM_MODE_FLAG_TUNNEL = 1,
164  };
165  
166  enum xfrm_replay_mode {
167  	XFRM_REPLAY_MODE_LEGACY,
168  	XFRM_REPLAY_MODE_BMP,
169  	XFRM_REPLAY_MODE_ESN,
170  };
171  
172  /* Full description of state of transformer. */
173  struct xfrm_state {
174  	possible_net_t		xs_net;
175  	union {
176  		struct hlist_node	gclist;
177  		struct hlist_node	bydst;
178  	};
179  	union {
180  		struct hlist_node	dev_gclist;
181  		struct hlist_node	bysrc;
182  	};
183  	struct hlist_node	byspi;
184  	struct hlist_node	byseq;
185  
186  	refcount_t		refcnt;
187  	spinlock_t		lock;
188  
189  	struct xfrm_id		id;
190  	struct xfrm_selector	sel;
191  	struct xfrm_mark	mark;
192  	u32			if_id;
193  	u32			tfcpad;
194  
195  	u32			genid;
196  
197  	/* Key manager bits */
198  	struct xfrm_state_walk	km;
199  
200  	/* Parameters of this state. */
201  	struct {
202  		u32		reqid;
203  		u8		mode;
204  		u8		replay_window;
205  		u8		aalgo, ealgo, calgo;
206  		u8		flags;
207  		u16		family;
208  		xfrm_address_t	saddr;
209  		int		header_len;
210  		int		trailer_len;
211  		u32		extra_flags;
212  		struct xfrm_mark	smark;
213  	} props;
214  
215  	struct xfrm_lifetime_cfg lft;
216  
217  	/* Data for transformer */
218  	struct xfrm_algo_auth	*aalg;
219  	struct xfrm_algo	*ealg;
220  	struct xfrm_algo	*calg;
221  	struct xfrm_algo_aead	*aead;
222  	const char		*geniv;
223  
224  	/* mapping change rate limiting */
225  	__be16 new_mapping_sport;
226  	u32 new_mapping;	/* seconds */
227  	u32 mapping_maxage;	/* seconds for input SA */
228  
229  	/* Data for encapsulator */
230  	struct xfrm_encap_tmpl	*encap;
231  	struct sock __rcu	*encap_sk;
232  
233  	/* Data for care-of address */
234  	xfrm_address_t	*coaddr;
235  
236  	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
237  	struct xfrm_state	*tunnel;
238  
239  	/* If a tunnel, number of users + 1 */
240  	atomic_t		tunnel_users;
241  
242  	/* State for replay detection */
243  	struct xfrm_replay_state replay;
244  	struct xfrm_replay_state_esn *replay_esn;
245  
246  	/* Replay detection state at the time we sent the last notification */
247  	struct xfrm_replay_state preplay;
248  	struct xfrm_replay_state_esn *preplay_esn;
249  
250  	/* replay detection mode */
251  	enum xfrm_replay_mode    repl_mode;
252  	/* internal flag that only holds state for delayed aevent at the
253  	 * moment
254  	*/
255  	u32			xflags;
256  
257  	/* Replay detection notification settings */
258  	u32			replay_maxage;
259  	u32			replay_maxdiff;
260  
261  	/* Replay detection notification timer */
262  	struct timer_list	rtimer;
263  
264  	/* Statistics */
265  	struct xfrm_stats	stats;
266  
267  	struct xfrm_lifetime_cur curlft;
268  	struct hrtimer		mtimer;
269  
270  	struct xfrm_dev_offload xso;
271  
272  	/* used to fix curlft->add_time when changing date */
273  	long		saved_tmo;
274  
275  	/* Last used time */
276  	time64_t		lastused;
277  
278  	struct page_frag xfrag;
279  
280  	/* Reference to data common to all the instances of this
281  	 * transformer. */
282  	const struct xfrm_type	*type;
283  	struct xfrm_mode	inner_mode;
284  	struct xfrm_mode	inner_mode_iaf;
285  	struct xfrm_mode	outer_mode;
286  
287  	const struct xfrm_type_offload	*type_offload;
288  
289  	/* Security context */
290  	struct xfrm_sec_ctx	*security;
291  
292  	/* Private data of this transformer, format is opaque,
293  	 * interpreted by xfrm_type methods. */
294  	void			*data;
295  };
296  
xs_net(struct xfrm_state * x)297  static inline struct net *xs_net(struct xfrm_state *x)
298  {
299  	return read_pnet(&x->xs_net);
300  }
301  
302  /* xflags - make enum if more show up */
303  #define XFRM_TIME_DEFER	1
304  #define XFRM_SOFT_EXPIRE 2
305  
306  enum {
307  	XFRM_STATE_VOID,
308  	XFRM_STATE_ACQ,
309  	XFRM_STATE_VALID,
310  	XFRM_STATE_ERROR,
311  	XFRM_STATE_EXPIRED,
312  	XFRM_STATE_DEAD
313  };
314  
315  /* callback structure passed from either netlink or pfkey */
316  struct km_event {
317  	union {
318  		u32 hard;
319  		u32 proto;
320  		u32 byid;
321  		u32 aevent;
322  		u32 type;
323  	} data;
324  
325  	u32	seq;
326  	u32	portid;
327  	u32	event;
328  	struct net *net;
329  };
330  
331  struct xfrm_if_decode_session_result {
332  	struct net *net;
333  	u32 if_id;
334  };
335  
336  struct xfrm_if_cb {
337  	bool (*decode_session)(struct sk_buff *skb,
338  			       unsigned short family,
339  			       struct xfrm_if_decode_session_result *res);
340  };
341  
342  void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
343  void xfrm_if_unregister_cb(void);
344  
345  struct xfrm_dst_lookup_params {
346  	struct net *net;
347  	int tos;
348  	int oif;
349  	xfrm_address_t *saddr;
350  	xfrm_address_t *daddr;
351  	u32 mark;
352  	__u8 ipproto;
353  	union flowi_uli uli;
354  };
355  
356  struct net_device;
357  struct xfrm_type;
358  struct xfrm_dst;
359  struct xfrm_policy_afinfo {
360  	struct dst_ops		*dst_ops;
361  	struct dst_entry	*(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
362  	int			(*get_saddr)(xfrm_address_t *saddr,
363  					     const struct xfrm_dst_lookup_params *params);
364  	int			(*fill_dst)(struct xfrm_dst *xdst,
365  					    struct net_device *dev,
366  					    const struct flowi *fl);
367  	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
368  };
369  
370  int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
371  void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
372  void km_policy_notify(struct xfrm_policy *xp, int dir,
373  		      const struct km_event *c);
374  void km_state_notify(struct xfrm_state *x, const struct km_event *c);
375  
376  struct xfrm_tmpl;
377  int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
378  	     struct xfrm_policy *pol);
379  void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
380  int __xfrm_state_delete(struct xfrm_state *x);
381  
382  struct xfrm_state_afinfo {
383  	u8				family;
384  	u8				proto;
385  
386  	const struct xfrm_type_offload *type_offload_esp;
387  
388  	const struct xfrm_type		*type_esp;
389  	const struct xfrm_type		*type_ipip;
390  	const struct xfrm_type		*type_ipip6;
391  	const struct xfrm_type		*type_comp;
392  	const struct xfrm_type		*type_ah;
393  	const struct xfrm_type		*type_routing;
394  	const struct xfrm_type		*type_dstopts;
395  
396  	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
397  	int			(*transport_finish)(struct sk_buff *skb,
398  						    int async);
399  	void			(*local_error)(struct sk_buff *skb, u32 mtu);
400  };
401  
402  int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
403  int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
404  struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
405  struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
406  
407  struct xfrm_input_afinfo {
408  	u8			family;
409  	bool			is_ipip;
410  	int			(*callback)(struct sk_buff *skb, u8 protocol,
411  					    int err);
412  };
413  
414  int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
415  int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
416  
417  void xfrm_flush_gc(void);
418  void xfrm_state_delete_tunnel(struct xfrm_state *x);
419  
420  struct xfrm_type {
421  	struct module		*owner;
422  	u8			proto;
423  	u8			flags;
424  #define XFRM_TYPE_NON_FRAGMENT	1
425  #define XFRM_TYPE_REPLAY_PROT	2
426  #define XFRM_TYPE_LOCAL_COADDR	4
427  #define XFRM_TYPE_REMOTE_COADDR	8
428  
429  	int			(*init_state)(struct xfrm_state *x,
430  					      struct netlink_ext_ack *extack);
431  	void			(*destructor)(struct xfrm_state *);
432  	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
433  	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
434  	int			(*reject)(struct xfrm_state *, struct sk_buff *,
435  					  const struct flowi *);
436  };
437  
438  int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
439  void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
440  
441  struct xfrm_type_offload {
442  	struct module	*owner;
443  	u8		proto;
444  	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
445  	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
446  	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
447  };
448  
449  int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
450  void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
451  
xfrm_af2proto(unsigned int family)452  static inline int xfrm_af2proto(unsigned int family)
453  {
454  	switch(family) {
455  	case AF_INET:
456  		return IPPROTO_IPIP;
457  	case AF_INET6:
458  		return IPPROTO_IPV6;
459  	default:
460  		return 0;
461  	}
462  }
463  
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)464  static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
465  {
466  	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
467  	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
468  		return &x->inner_mode;
469  	else
470  		return &x->inner_mode_iaf;
471  }
472  
473  struct xfrm_tmpl {
474  /* id in template is interpreted as:
475   * daddr - destination of tunnel, may be zero for transport mode.
476   * spi   - zero to acquire spi. Not zero if spi is static, then
477   *	   daddr must be fixed too.
478   * proto - AH/ESP/IPCOMP
479   */
480  	struct xfrm_id		id;
481  
482  /* Source address of tunnel. Ignored, if it is not a tunnel. */
483  	xfrm_address_t		saddr;
484  
485  	unsigned short		encap_family;
486  
487  	u32			reqid;
488  
489  /* Mode: transport, tunnel etc. */
490  	u8			mode;
491  
492  /* Sharing mode: unique, this session only, this user only etc. */
493  	u8			share;
494  
495  /* May skip this transfomration if no SA is found */
496  	u8			optional;
497  
498  /* Skip aalgos/ealgos/calgos checks. */
499  	u8			allalgs;
500  
501  /* Bit mask of algos allowed for acquisition */
502  	u32			aalgos;
503  	u32			ealgos;
504  	u32			calgos;
505  };
506  
507  #define XFRM_MAX_DEPTH		6
508  #define XFRM_MAX_OFFLOAD_DEPTH	1
509  
510  struct xfrm_policy_walk_entry {
511  	struct list_head	all;
512  	u8			dead;
513  };
514  
515  struct xfrm_policy_walk {
516  	struct xfrm_policy_walk_entry walk;
517  	u8 type;
518  	u32 seq;
519  };
520  
521  struct xfrm_policy_queue {
522  	struct sk_buff_head	hold_queue;
523  	struct timer_list	hold_timer;
524  	unsigned long		timeout;
525  };
526  
527  struct xfrm_policy {
528  	possible_net_t		xp_net;
529  	struct hlist_node	bydst;
530  	struct hlist_node	byidx;
531  
532  	/* This lock only affects elements except for entry. */
533  	rwlock_t		lock;
534  	refcount_t		refcnt;
535  	u32			pos;
536  	struct timer_list	timer;
537  
538  	atomic_t		genid;
539  	u32			priority;
540  	u32			index;
541  	u32			if_id;
542  	struct xfrm_mark	mark;
543  	struct xfrm_selector	selector;
544  	struct xfrm_lifetime_cfg lft;
545  	struct xfrm_lifetime_cur curlft;
546  	struct xfrm_policy_walk_entry walk;
547  	struct xfrm_policy_queue polq;
548  	bool                    bydst_reinsert;
549  	u8			type;
550  	u8			action;
551  	u8			flags;
552  	u8			xfrm_nr;
553  	u16			family;
554  	struct xfrm_sec_ctx	*security;
555  	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
556  	struct hlist_node	bydst_inexact_list;
557  	struct rcu_head		rcu;
558  
559  	struct xfrm_dev_offload xdo;
560  };
561  
xp_net(const struct xfrm_policy * xp)562  static inline struct net *xp_net(const struct xfrm_policy *xp)
563  {
564  	return read_pnet(&xp->xp_net);
565  }
566  
567  struct xfrm_kmaddress {
568  	xfrm_address_t          local;
569  	xfrm_address_t          remote;
570  	u32			reserved;
571  	u16			family;
572  };
573  
574  struct xfrm_migrate {
575  	xfrm_address_t		old_daddr;
576  	xfrm_address_t		old_saddr;
577  	xfrm_address_t		new_daddr;
578  	xfrm_address_t		new_saddr;
579  	u8			proto;
580  	u8			mode;
581  	u16			reserved;
582  	u32			reqid;
583  	u16			old_family;
584  	u16			new_family;
585  };
586  
587  #define XFRM_KM_TIMEOUT                30
588  /* what happened */
589  #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
590  #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
591  
592  /* default aevent timeout in units of 100ms */
593  #define XFRM_AE_ETIME			10
594  /* Async Event timer multiplier */
595  #define XFRM_AE_ETH_M			10
596  /* default seq threshold size */
597  #define XFRM_AE_SEQT_SIZE		2
598  
599  struct xfrm_mgr {
600  	struct list_head	list;
601  	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
602  	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
603  	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
604  	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
605  	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
606  	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
607  	int			(*migrate)(const struct xfrm_selector *sel,
608  					   u8 dir, u8 type,
609  					   const struct xfrm_migrate *m,
610  					   int num_bundles,
611  					   const struct xfrm_kmaddress *k,
612  					   const struct xfrm_encap_tmpl *encap);
613  	bool			(*is_alive)(const struct km_event *c);
614  };
615  
616  void xfrm_register_km(struct xfrm_mgr *km);
617  void xfrm_unregister_km(struct xfrm_mgr *km);
618  
619  struct xfrm_tunnel_skb_cb {
620  	union {
621  		struct inet_skb_parm h4;
622  		struct inet6_skb_parm h6;
623  	} header;
624  
625  	union {
626  		struct ip_tunnel *ip4;
627  		struct ip6_tnl *ip6;
628  	} tunnel;
629  };
630  
631  #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
632  
633  /*
634   * This structure is used for the duration where packets are being
635   * transformed by IPsec.  As soon as the packet leaves IPsec the
636   * area beyond the generic IP part may be overwritten.
637   */
638  struct xfrm_skb_cb {
639  	struct xfrm_tunnel_skb_cb header;
640  
641          /* Sequence number for replay protection. */
642  	union {
643  		struct {
644  			__u32 low;
645  			__u32 hi;
646  		} output;
647  		struct {
648  			__be32 low;
649  			__be32 hi;
650  		} input;
651  	} seq;
652  };
653  
654  #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
655  
656  /*
657   * This structure is used by the afinfo prepare_input/prepare_output functions
658   * to transmit header information to the mode input/output functions.
659   */
660  struct xfrm_mode_skb_cb {
661  	struct xfrm_tunnel_skb_cb header;
662  
663  	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
664  	__be16 id;
665  	__be16 frag_off;
666  
667  	/* IP header length (excluding options or extension headers). */
668  	u8 ihl;
669  
670  	/* TOS for IPv4, class for IPv6. */
671  	u8 tos;
672  
673  	/* TTL for IPv4, hop limitfor IPv6. */
674  	u8 ttl;
675  
676  	/* Protocol for IPv4, NH for IPv6. */
677  	u8 protocol;
678  
679  	/* Option length for IPv4, zero for IPv6. */
680  	u8 optlen;
681  
682  	/* Used by IPv6 only, zero for IPv4. */
683  	u8 flow_lbl[3];
684  };
685  
686  #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
687  
688  /*
689   * This structure is used by the input processing to locate the SPI and
690   * related information.
691   */
692  struct xfrm_spi_skb_cb {
693  	struct xfrm_tunnel_skb_cb header;
694  
695  	unsigned int daddroff;
696  	unsigned int family;
697  	__be32 seq;
698  };
699  
700  #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
701  
702  #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)703  static inline struct audit_buffer *xfrm_audit_start(const char *op)
704  {
705  	struct audit_buffer *audit_buf = NULL;
706  
707  	if (audit_enabled == AUDIT_OFF)
708  		return NULL;
709  	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
710  				    AUDIT_MAC_IPSEC_EVENT);
711  	if (audit_buf == NULL)
712  		return NULL;
713  	audit_log_format(audit_buf, "op=%s", op);
714  	return audit_buf;
715  }
716  
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)717  static inline void xfrm_audit_helper_usrinfo(bool task_valid,
718  					     struct audit_buffer *audit_buf)
719  {
720  	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
721  					    audit_get_loginuid(current) :
722  					    INVALID_UID);
723  	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
724  		AUDIT_SID_UNSET;
725  
726  	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
727  	audit_log_task_context(audit_buf);
728  }
729  
730  void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
731  void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
732  			      bool task_valid);
733  void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
734  void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
735  void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
736  				      struct sk_buff *skb);
737  void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
738  			     __be32 net_seq);
739  void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
740  void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
741  			       __be32 net_seq);
742  void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
743  			      u8 proto);
744  #else
745  
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)746  static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
747  					 bool task_valid)
748  {
749  }
750  
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)751  static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
752  					    bool task_valid)
753  {
754  }
755  
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)756  static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
757  					bool task_valid)
758  {
759  }
760  
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)761  static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
762  					   bool task_valid)
763  {
764  }
765  
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)766  static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
767  					     struct sk_buff *skb)
768  {
769  }
770  
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)771  static inline void xfrm_audit_state_replay(struct xfrm_state *x,
772  					   struct sk_buff *skb, __be32 net_seq)
773  {
774  }
775  
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)776  static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
777  				      u16 family)
778  {
779  }
780  
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)781  static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
782  				      __be32 net_spi, __be32 net_seq)
783  {
784  }
785  
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)786  static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
787  				     struct sk_buff *skb, u8 proto)
788  {
789  }
790  #endif /* CONFIG_AUDITSYSCALL */
791  
xfrm_pol_hold(struct xfrm_policy * policy)792  static inline void xfrm_pol_hold(struct xfrm_policy *policy)
793  {
794  	if (likely(policy != NULL))
795  		refcount_inc(&policy->refcnt);
796  }
797  
798  void xfrm_policy_destroy(struct xfrm_policy *policy);
799  
xfrm_pol_put(struct xfrm_policy * policy)800  static inline void xfrm_pol_put(struct xfrm_policy *policy)
801  {
802  	if (refcount_dec_and_test(&policy->refcnt))
803  		xfrm_policy_destroy(policy);
804  }
805  
xfrm_pols_put(struct xfrm_policy ** pols,int npols)806  static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
807  {
808  	int i;
809  	for (i = npols - 1; i >= 0; --i)
810  		xfrm_pol_put(pols[i]);
811  }
812  
813  void __xfrm_state_destroy(struct xfrm_state *, bool);
814  
__xfrm_state_put(struct xfrm_state * x)815  static inline void __xfrm_state_put(struct xfrm_state *x)
816  {
817  	refcount_dec(&x->refcnt);
818  }
819  
xfrm_state_put(struct xfrm_state * x)820  static inline void xfrm_state_put(struct xfrm_state *x)
821  {
822  	if (refcount_dec_and_test(&x->refcnt))
823  		__xfrm_state_destroy(x, false);
824  }
825  
xfrm_state_put_sync(struct xfrm_state * x)826  static inline void xfrm_state_put_sync(struct xfrm_state *x)
827  {
828  	if (refcount_dec_and_test(&x->refcnt))
829  		__xfrm_state_destroy(x, true);
830  }
831  
xfrm_state_hold(struct xfrm_state * x)832  static inline void xfrm_state_hold(struct xfrm_state *x)
833  {
834  	refcount_inc(&x->refcnt);
835  }
836  
addr_match(const void * token1,const void * token2,unsigned int prefixlen)837  static inline bool addr_match(const void *token1, const void *token2,
838  			      unsigned int prefixlen)
839  {
840  	const __be32 *a1 = token1;
841  	const __be32 *a2 = token2;
842  	unsigned int pdw;
843  	unsigned int pbi;
844  
845  	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
846  	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
847  
848  	if (pdw)
849  		if (memcmp(a1, a2, pdw << 2))
850  			return false;
851  
852  	if (pbi) {
853  		__be32 mask;
854  
855  		mask = htonl((0xffffffff) << (32 - pbi));
856  
857  		if ((a1[pdw] ^ a2[pdw]) & mask)
858  			return false;
859  	}
860  
861  	return true;
862  }
863  
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)864  static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
865  {
866  	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
867  	if (sizeof(long) == 4 && prefixlen == 0)
868  		return true;
869  	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
870  }
871  
872  static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)873  __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
874  {
875  	__be16 port;
876  	switch(fl->flowi_proto) {
877  	case IPPROTO_TCP:
878  	case IPPROTO_UDP:
879  	case IPPROTO_UDPLITE:
880  	case IPPROTO_SCTP:
881  		port = uli->ports.sport;
882  		break;
883  	case IPPROTO_ICMP:
884  	case IPPROTO_ICMPV6:
885  		port = htons(uli->icmpt.type);
886  		break;
887  	case IPPROTO_MH:
888  		port = htons(uli->mht.type);
889  		break;
890  	case IPPROTO_GRE:
891  		port = htons(ntohl(uli->gre_key) >> 16);
892  		break;
893  	default:
894  		port = 0;	/*XXX*/
895  	}
896  	return port;
897  }
898  
899  static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)900  __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
901  {
902  	__be16 port;
903  	switch(fl->flowi_proto) {
904  	case IPPROTO_TCP:
905  	case IPPROTO_UDP:
906  	case IPPROTO_UDPLITE:
907  	case IPPROTO_SCTP:
908  		port = uli->ports.dport;
909  		break;
910  	case IPPROTO_ICMP:
911  	case IPPROTO_ICMPV6:
912  		port = htons(uli->icmpt.code);
913  		break;
914  	case IPPROTO_GRE:
915  		port = htons(ntohl(uli->gre_key) & 0xffff);
916  		break;
917  	default:
918  		port = 0;	/*XXX*/
919  	}
920  	return port;
921  }
922  
923  bool xfrm_selector_match(const struct xfrm_selector *sel,
924  			 const struct flowi *fl, unsigned short family);
925  
926  #ifdef CONFIG_SECURITY_NETWORK_XFRM
927  /*	If neither has a context --> match
928   * 	Otherwise, both must have a context and the sids, doi, alg must match
929   */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)930  static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
931  {
932  	return ((!s1 && !s2) ||
933  		(s1 && s2 &&
934  		 (s1->ctx_sid == s2->ctx_sid) &&
935  		 (s1->ctx_doi == s2->ctx_doi) &&
936  		 (s1->ctx_alg == s2->ctx_alg)));
937  }
938  #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)939  static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
940  {
941  	return true;
942  }
943  #endif
944  
945  /* A struct encoding bundle of transformations to apply to some set of flow.
946   *
947   * xdst->child points to the next element of bundle.
948   * dst->xfrm  points to an instanse of transformer.
949   *
950   * Due to unfortunate limitations of current routing cache, which we
951   * have no time to fix, it mirrors struct rtable and bound to the same
952   * routing key, including saddr,daddr. However, we can have many of
953   * bundles differing by session id. All the bundles grow from a parent
954   * policy rule.
955   */
956  struct xfrm_dst {
957  	union {
958  		struct dst_entry	dst;
959  		struct rtable		rt;
960  		struct rt6_info		rt6;
961  	} u;
962  	struct dst_entry *route;
963  	struct dst_entry *child;
964  	struct dst_entry *path;
965  	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
966  	int num_pols, num_xfrms;
967  	u32 xfrm_genid;
968  	u32 policy_genid;
969  	u32 route_mtu_cached;
970  	u32 child_mtu_cached;
971  	u32 route_cookie;
972  	u32 path_cookie;
973  };
974  
xfrm_dst_path(const struct dst_entry * dst)975  static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
976  {
977  #ifdef CONFIG_XFRM
978  	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
979  		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
980  
981  		return xdst->path;
982  	}
983  #endif
984  	return (struct dst_entry *) dst;
985  }
986  
xfrm_dst_child(const struct dst_entry * dst)987  static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
988  {
989  #ifdef CONFIG_XFRM
990  	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
991  		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
992  		return xdst->child;
993  	}
994  #endif
995  	return NULL;
996  }
997  
998  #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)999  static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1000  {
1001  	xdst->child = child;
1002  }
1003  
xfrm_dst_destroy(struct xfrm_dst * xdst)1004  static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1005  {
1006  	xfrm_pols_put(xdst->pols, xdst->num_pols);
1007  	dst_release(xdst->route);
1008  	if (likely(xdst->u.dst.xfrm))
1009  		xfrm_state_put(xdst->u.dst.xfrm);
1010  }
1011  #endif
1012  
1013  void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1014  
1015  struct xfrm_if_parms {
1016  	int link;		/* ifindex of underlying L2 interface */
1017  	u32 if_id;		/* interface identifyer */
1018  	bool collect_md;
1019  };
1020  
1021  struct xfrm_if {
1022  	struct xfrm_if __rcu *next;	/* next interface in list */
1023  	struct net_device *dev;		/* virtual device associated with interface */
1024  	struct net *net;		/* netns for packet i/o */
1025  	struct xfrm_if_parms p;		/* interface parms */
1026  
1027  	struct gro_cells gro_cells;
1028  };
1029  
1030  struct xfrm_offload {
1031  	/* Output sequence number for replay protection on offloading. */
1032  	struct {
1033  		__u32 low;
1034  		__u32 hi;
1035  	} seq;
1036  
1037  	__u32			flags;
1038  #define	SA_DELETE_REQ		1
1039  #define	CRYPTO_DONE		2
1040  #define	CRYPTO_NEXT_DONE	4
1041  #define	CRYPTO_FALLBACK		8
1042  #define	XFRM_GSO_SEGMENT	16
1043  #define	XFRM_GRO		32
1044  /* 64 is free */
1045  #define	XFRM_DEV_RESUME		128
1046  #define	XFRM_XMIT		256
1047  
1048  	__u32			status;
1049  #define CRYPTO_SUCCESS				1
1050  #define CRYPTO_GENERIC_ERROR			2
1051  #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1052  #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1053  #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1054  #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1055  #define CRYPTO_INVALID_PACKET_SYNTAX		64
1056  #define CRYPTO_INVALID_PROTOCOL			128
1057  
1058  	/* Used to keep whole l2 header for transport mode GRO */
1059  	__u32			orig_mac_len;
1060  
1061  	__u8			proto;
1062  	__u8			inner_ipproto;
1063  };
1064  
1065  struct sec_path {
1066  	int			len;
1067  	int			olen;
1068  	int			verified_cnt;
1069  
1070  	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1071  	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1072  };
1073  
1074  struct sec_path *secpath_set(struct sk_buff *skb);
1075  
1076  static inline void
secpath_reset(struct sk_buff * skb)1077  secpath_reset(struct sk_buff *skb)
1078  {
1079  #ifdef CONFIG_XFRM
1080  	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1081  #endif
1082  }
1083  
1084  static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1085  xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1086  {
1087  	switch (family) {
1088  	case AF_INET:
1089  		return addr->a4 == 0;
1090  	case AF_INET6:
1091  		return ipv6_addr_any(&addr->in6);
1092  	}
1093  	return 0;
1094  }
1095  
1096  static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1097  __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1098  {
1099  	return	(tmpl->saddr.a4 &&
1100  		 tmpl->saddr.a4 != x->props.saddr.a4);
1101  }
1102  
1103  static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1104  __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1105  {
1106  	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1107  		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1108  }
1109  
1110  static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1111  xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1112  {
1113  	switch (family) {
1114  	case AF_INET:
1115  		return __xfrm4_state_addr_cmp(tmpl, x);
1116  	case AF_INET6:
1117  		return __xfrm6_state_addr_cmp(tmpl, x);
1118  	}
1119  	return !0;
1120  }
1121  
1122  #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1123  static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1124  {
1125  	struct sec_path *sp = skb_sec_path(skb);
1126  
1127  	return sp->xvec[sp->len - 1];
1128  }
1129  #endif
1130  
xfrm_offload(struct sk_buff * skb)1131  static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1132  {
1133  #ifdef CONFIG_XFRM
1134  	struct sec_path *sp = skb_sec_path(skb);
1135  
1136  	if (!sp || !sp->olen || sp->len != sp->olen)
1137  		return NULL;
1138  
1139  	return &sp->ovec[sp->olen - 1];
1140  #else
1141  	return NULL;
1142  #endif
1143  }
1144  
1145  #ifdef CONFIG_XFRM
1146  int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1147  			unsigned short family);
1148  
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1149  static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1150  					 int dir)
1151  {
1152  	if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1153  		return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1154  
1155  	return false;
1156  }
1157  
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1158  static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1159  					     int dir, unsigned short family)
1160  {
1161  	if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1162  		/* same dst may be used for traffic originating from
1163  		 * devices with different policy settings.
1164  		 */
1165  		return IPCB(skb)->flags & IPSKB_NOPOLICY;
1166  	}
1167  	return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1168  }
1169  
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1170  static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1171  				       struct sk_buff *skb,
1172  				       unsigned int family, int reverse)
1173  {
1174  	struct net *net = dev_net(skb->dev);
1175  	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1176  	struct xfrm_offload *xo = xfrm_offload(skb);
1177  	struct xfrm_state *x;
1178  
1179  	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1180  		return __xfrm_policy_check(sk, ndir, skb, family);
1181  
1182  	if (xo) {
1183  		x = xfrm_input_state(skb);
1184  		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
1185  			bool check = (xo->flags & CRYPTO_DONE) &&
1186  				     (xo->status & CRYPTO_SUCCESS);
1187  
1188  			/* The packets here are plain ones and secpath was
1189  			 * needed to indicate that hardware already handled
1190  			 * them and there is no need to do nothing in addition.
1191  			 *
1192  			 * Consume secpath which was set by drivers.
1193  			 */
1194  			secpath_reset(skb);
1195  			return check;
1196  		}
1197  	}
1198  
1199  	return __xfrm_check_nopolicy(net, skb, dir) ||
1200  	       __xfrm_check_dev_nopolicy(skb, dir, family) ||
1201  	       __xfrm_policy_check(sk, ndir, skb, family);
1202  }
1203  
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1204  static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1205  {
1206  	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1207  }
1208  
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1209  static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1210  {
1211  	return xfrm_policy_check(sk, dir, skb, AF_INET);
1212  }
1213  
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1214  static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1215  {
1216  	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1217  }
1218  
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1219  static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1220  					     struct sk_buff *skb)
1221  {
1222  	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1223  }
1224  
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1225  static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1226  					     struct sk_buff *skb)
1227  {
1228  	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1229  }
1230  
1231  int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1232  			  unsigned int family, int reverse);
1233  
xfrm_decode_session(struct sk_buff * skb,struct flowi * fl,unsigned int family)1234  static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1235  				      unsigned int family)
1236  {
1237  	return __xfrm_decode_session(skb, fl, family, 0);
1238  }
1239  
xfrm_decode_session_reverse(struct sk_buff * skb,struct flowi * fl,unsigned int family)1240  static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1241  					      struct flowi *fl,
1242  					      unsigned int family)
1243  {
1244  	return __xfrm_decode_session(skb, fl, family, 1);
1245  }
1246  
1247  int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1248  
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1249  static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1250  {
1251  	struct net *net = dev_net(skb->dev);
1252  
1253  	if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1254  	    net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1255  		return true;
1256  
1257  	return (skb_dst(skb)->flags & DST_NOXFRM) ||
1258  	       __xfrm_route_forward(skb, family);
1259  }
1260  
xfrm4_route_forward(struct sk_buff * skb)1261  static inline int xfrm4_route_forward(struct sk_buff *skb)
1262  {
1263  	return xfrm_route_forward(skb, AF_INET);
1264  }
1265  
xfrm6_route_forward(struct sk_buff * skb)1266  static inline int xfrm6_route_forward(struct sk_buff *skb)
1267  {
1268  	return xfrm_route_forward(skb, AF_INET6);
1269  }
1270  
1271  int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1272  
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1273  static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1274  {
1275  	if (!sk_fullsock(osk))
1276  		return 0;
1277  	sk->sk_policy[0] = NULL;
1278  	sk->sk_policy[1] = NULL;
1279  	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1280  		return __xfrm_sk_clone_policy(sk, osk);
1281  	return 0;
1282  }
1283  
1284  int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1285  
xfrm_sk_free_policy(struct sock * sk)1286  static inline void xfrm_sk_free_policy(struct sock *sk)
1287  {
1288  	struct xfrm_policy *pol;
1289  
1290  	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1291  	if (unlikely(pol != NULL)) {
1292  		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1293  		sk->sk_policy[0] = NULL;
1294  	}
1295  	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1296  	if (unlikely(pol != NULL)) {
1297  		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1298  		sk->sk_policy[1] = NULL;
1299  	}
1300  }
1301  
1302  #else
1303  
xfrm_sk_free_policy(struct sock * sk)1304  static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1305  static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1306  static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1307  static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1308  static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1309  {
1310  	return 1;
1311  }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1312  static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1313  {
1314  	return 1;
1315  }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1316  static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1317  {
1318  	return 1;
1319  }
xfrm_decode_session_reverse(struct sk_buff * skb,struct flowi * fl,unsigned int family)1320  static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1321  					      struct flowi *fl,
1322  					      unsigned int family)
1323  {
1324  	return -ENOSYS;
1325  }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1326  static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1327  					     struct sk_buff *skb)
1328  {
1329  	return 1;
1330  }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1331  static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1332  					     struct sk_buff *skb)
1333  {
1334  	return 1;
1335  }
1336  #endif
1337  
1338  static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1339  xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1340  {
1341  	switch (family){
1342  	case AF_INET:
1343  		return (xfrm_address_t *)&fl->u.ip4.daddr;
1344  	case AF_INET6:
1345  		return (xfrm_address_t *)&fl->u.ip6.daddr;
1346  	}
1347  	return NULL;
1348  }
1349  
1350  static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1351  xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1352  {
1353  	switch (family){
1354  	case AF_INET:
1355  		return (xfrm_address_t *)&fl->u.ip4.saddr;
1356  	case AF_INET6:
1357  		return (xfrm_address_t *)&fl->u.ip6.saddr;
1358  	}
1359  	return NULL;
1360  }
1361  
1362  static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1363  void xfrm_flowi_addr_get(const struct flowi *fl,
1364  			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1365  			 unsigned short family)
1366  {
1367  	switch(family) {
1368  	case AF_INET:
1369  		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1370  		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1371  		break;
1372  	case AF_INET6:
1373  		saddr->in6 = fl->u.ip6.saddr;
1374  		daddr->in6 = fl->u.ip6.daddr;
1375  		break;
1376  	}
1377  }
1378  
1379  static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1380  __xfrm4_state_addr_check(const struct xfrm_state *x,
1381  			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1382  {
1383  	if (daddr->a4 == x->id.daddr.a4 &&
1384  	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1385  		return 1;
1386  	return 0;
1387  }
1388  
1389  static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1390  __xfrm6_state_addr_check(const struct xfrm_state *x,
1391  			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1392  {
1393  	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1394  	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1395  	     ipv6_addr_any((struct in6_addr *)saddr) ||
1396  	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1397  		return 1;
1398  	return 0;
1399  }
1400  
1401  static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1402  xfrm_state_addr_check(const struct xfrm_state *x,
1403  		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1404  		      unsigned short family)
1405  {
1406  	switch (family) {
1407  	case AF_INET:
1408  		return __xfrm4_state_addr_check(x, daddr, saddr);
1409  	case AF_INET6:
1410  		return __xfrm6_state_addr_check(x, daddr, saddr);
1411  	}
1412  	return 0;
1413  }
1414  
1415  static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1416  xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1417  			   unsigned short family)
1418  {
1419  	switch (family) {
1420  	case AF_INET:
1421  		return __xfrm4_state_addr_check(x,
1422  						(const xfrm_address_t *)&fl->u.ip4.daddr,
1423  						(const xfrm_address_t *)&fl->u.ip4.saddr);
1424  	case AF_INET6:
1425  		return __xfrm6_state_addr_check(x,
1426  						(const xfrm_address_t *)&fl->u.ip6.daddr,
1427  						(const xfrm_address_t *)&fl->u.ip6.saddr);
1428  	}
1429  	return 0;
1430  }
1431  
xfrm_state_kern(const struct xfrm_state * x)1432  static inline int xfrm_state_kern(const struct xfrm_state *x)
1433  {
1434  	return atomic_read(&x->tunnel_users);
1435  }
1436  
xfrm_id_proto_valid(u8 proto)1437  static inline bool xfrm_id_proto_valid(u8 proto)
1438  {
1439  	switch (proto) {
1440  	case IPPROTO_AH:
1441  	case IPPROTO_ESP:
1442  	case IPPROTO_COMP:
1443  #if IS_ENABLED(CONFIG_IPV6)
1444  	case IPPROTO_ROUTING:
1445  	case IPPROTO_DSTOPTS:
1446  #endif
1447  		return true;
1448  	default:
1449  		return false;
1450  	}
1451  }
1452  
1453  /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1454  static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1455  {
1456  	return (!userproto || proto == userproto ||
1457  		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1458  						  proto == IPPROTO_ESP ||
1459  						  proto == IPPROTO_COMP)));
1460  }
1461  
1462  /*
1463   * xfrm algorithm information
1464   */
1465  struct xfrm_algo_aead_info {
1466  	char *geniv;
1467  	u16 icv_truncbits;
1468  };
1469  
1470  struct xfrm_algo_auth_info {
1471  	u16 icv_truncbits;
1472  	u16 icv_fullbits;
1473  };
1474  
1475  struct xfrm_algo_encr_info {
1476  	char *geniv;
1477  	u16 blockbits;
1478  	u16 defkeybits;
1479  };
1480  
1481  struct xfrm_algo_comp_info {
1482  	u16 threshold;
1483  };
1484  
1485  struct xfrm_algo_desc {
1486  	char *name;
1487  	char *compat;
1488  	u8 available:1;
1489  	u8 pfkey_supported:1;
1490  	union {
1491  		struct xfrm_algo_aead_info aead;
1492  		struct xfrm_algo_auth_info auth;
1493  		struct xfrm_algo_encr_info encr;
1494  		struct xfrm_algo_comp_info comp;
1495  	} uinfo;
1496  	struct sadb_alg desc;
1497  };
1498  
1499  /* XFRM protocol handlers.  */
1500  struct xfrm4_protocol {
1501  	int (*handler)(struct sk_buff *skb);
1502  	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1503  			     int encap_type);
1504  	int (*cb_handler)(struct sk_buff *skb, int err);
1505  	int (*err_handler)(struct sk_buff *skb, u32 info);
1506  
1507  	struct xfrm4_protocol __rcu *next;
1508  	int priority;
1509  };
1510  
1511  struct xfrm6_protocol {
1512  	int (*handler)(struct sk_buff *skb);
1513  	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1514  			     int encap_type);
1515  	int (*cb_handler)(struct sk_buff *skb, int err);
1516  	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1517  			   u8 type, u8 code, int offset, __be32 info);
1518  
1519  	struct xfrm6_protocol __rcu *next;
1520  	int priority;
1521  };
1522  
1523  /* XFRM tunnel handlers.  */
1524  struct xfrm_tunnel {
1525  	int (*handler)(struct sk_buff *skb);
1526  	int (*cb_handler)(struct sk_buff *skb, int err);
1527  	int (*err_handler)(struct sk_buff *skb, u32 info);
1528  
1529  	struct xfrm_tunnel __rcu *next;
1530  	int priority;
1531  };
1532  
1533  struct xfrm6_tunnel {
1534  	int (*handler)(struct sk_buff *skb);
1535  	int (*cb_handler)(struct sk_buff *skb, int err);
1536  	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1537  			   u8 type, u8 code, int offset, __be32 info);
1538  	struct xfrm6_tunnel __rcu *next;
1539  	int priority;
1540  };
1541  
1542  void xfrm_init(void);
1543  void xfrm4_init(void);
1544  int xfrm_state_init(struct net *net);
1545  void xfrm_state_fini(struct net *net);
1546  void xfrm4_state_init(void);
1547  void xfrm4_protocol_init(void);
1548  #ifdef CONFIG_XFRM
1549  int xfrm6_init(void);
1550  void xfrm6_fini(void);
1551  int xfrm6_state_init(void);
1552  void xfrm6_state_fini(void);
1553  int xfrm6_protocol_init(void);
1554  void xfrm6_protocol_fini(void);
1555  #else
xfrm6_init(void)1556  static inline int xfrm6_init(void)
1557  {
1558  	return 0;
1559  }
xfrm6_fini(void)1560  static inline void xfrm6_fini(void)
1561  {
1562  	;
1563  }
1564  #endif
1565  
1566  #ifdef CONFIG_XFRM_STATISTICS
1567  int xfrm_proc_init(struct net *net);
1568  void xfrm_proc_fini(struct net *net);
1569  #endif
1570  
1571  int xfrm_sysctl_init(struct net *net);
1572  #ifdef CONFIG_SYSCTL
1573  void xfrm_sysctl_fini(struct net *net);
1574  #else
xfrm_sysctl_fini(struct net * net)1575  static inline void xfrm_sysctl_fini(struct net *net)
1576  {
1577  }
1578  #endif
1579  
1580  void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1581  			  struct xfrm_address_filter *filter);
1582  int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1583  		    int (*func)(struct xfrm_state *, int, void*), void *);
1584  void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1585  struct xfrm_state *xfrm_state_alloc(struct net *net);
1586  void xfrm_state_free(struct xfrm_state *x);
1587  struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1588  				   const xfrm_address_t *saddr,
1589  				   const struct flowi *fl,
1590  				   struct xfrm_tmpl *tmpl,
1591  				   struct xfrm_policy *pol, int *err,
1592  				   unsigned short family, u32 if_id);
1593  struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1594  				       xfrm_address_t *daddr,
1595  				       xfrm_address_t *saddr,
1596  				       unsigned short family,
1597  				       u8 mode, u8 proto, u32 reqid);
1598  struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1599  					      unsigned short family);
1600  int xfrm_state_check_expire(struct xfrm_state *x);
1601  #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_curlft(struct xfrm_state * x)1602  static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
1603  {
1604  	struct xfrm_dev_offload *xdo = &x->xso;
1605  	struct net_device *dev = READ_ONCE(xdo->dev);
1606  
1607  	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1608  		return;
1609  
1610  	if (dev && dev->xfrmdev_ops &&
1611  	    dev->xfrmdev_ops->xdo_dev_state_update_curlft)
1612  		dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
1613  
1614  }
1615  #else
xfrm_dev_state_update_curlft(struct xfrm_state * x)1616  static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
1617  #endif
1618  void xfrm_state_insert(struct xfrm_state *x);
1619  int xfrm_state_add(struct xfrm_state *x);
1620  int xfrm_state_update(struct xfrm_state *x);
1621  struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1622  				     const xfrm_address_t *daddr, __be32 spi,
1623  				     u8 proto, unsigned short family);
1624  struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1625  					    const xfrm_address_t *daddr,
1626  					    const xfrm_address_t *saddr,
1627  					    u8 proto,
1628  					    unsigned short family);
1629  #ifdef CONFIG_XFRM_SUB_POLICY
1630  void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1631  		    unsigned short family);
1632  void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1633  		     unsigned short family);
1634  #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1635  static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1636  				  int n, unsigned short family)
1637  {
1638  }
1639  
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1640  static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1641  				   int n, unsigned short family)
1642  {
1643  }
1644  #endif
1645  
1646  struct xfrmk_sadinfo {
1647  	u32 sadhcnt; /* current hash bkts */
1648  	u32 sadhmcnt; /* max allowed hash bkts */
1649  	u32 sadcnt; /* current running count */
1650  };
1651  
1652  struct xfrmk_spdinfo {
1653  	u32 incnt;
1654  	u32 outcnt;
1655  	u32 fwdcnt;
1656  	u32 inscnt;
1657  	u32 outscnt;
1658  	u32 fwdscnt;
1659  	u32 spdhcnt;
1660  	u32 spdhmcnt;
1661  };
1662  
1663  struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1664  int xfrm_state_delete(struct xfrm_state *x);
1665  int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1666  int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1667  int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1668  			  bool task_valid);
1669  void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1670  void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1671  u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1672  int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1673  u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1674  int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
1675  		      struct netlink_ext_ack *extack);
1676  int xfrm_init_state(struct xfrm_state *x);
1677  int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1678  int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1679  int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1680  			 int (*finish)(struct net *, struct sock *,
1681  				       struct sk_buff *));
1682  int xfrm_trans_queue(struct sk_buff *skb,
1683  		     int (*finish)(struct net *, struct sock *,
1684  				   struct sk_buff *));
1685  int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1686  int xfrm_output(struct sock *sk, struct sk_buff *skb);
1687  
1688  #if IS_ENABLED(CONFIG_NET_PKTGEN)
1689  int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1690  #endif
1691  
1692  void xfrm_local_error(struct sk_buff *skb, int mtu);
1693  int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1694  int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1695  		    int encap_type);
1696  int xfrm4_transport_finish(struct sk_buff *skb, int async);
1697  int xfrm4_rcv(struct sk_buff *skb);
1698  
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1699  static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1700  {
1701  	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1702  	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1703  	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1704  	return xfrm_input(skb, nexthdr, spi, 0);
1705  }
1706  
1707  int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1708  int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1709  int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1710  int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1711  int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1712  void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1713  int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1714  int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1715  		  struct ip6_tnl *t);
1716  int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1717  		    int encap_type);
1718  int xfrm6_transport_finish(struct sk_buff *skb, int async);
1719  int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1720  int xfrm6_rcv(struct sk_buff *skb);
1721  int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1722  		     xfrm_address_t *saddr, u8 proto);
1723  void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1724  int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1725  int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1726  int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1727  int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1728  __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1729  __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1730  int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1731  
1732  #ifdef CONFIG_XFRM
1733  void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1734  int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1735  int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1736  int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1737  		     int optlen);
1738  #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1739  static inline int xfrm_user_policy(struct sock *sk, int optname,
1740  				   sockptr_t optval, int optlen)
1741  {
1742   	return -ENOPROTOOPT;
1743  }
1744  #endif
1745  
1746  struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1747  
1748  struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1749  
1750  void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1751  int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1752  		     int (*func)(struct xfrm_policy *, int, int, void*),
1753  		     void *);
1754  void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1755  int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1756  struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1757  					  const struct xfrm_mark *mark,
1758  					  u32 if_id, u8 type, int dir,
1759  					  struct xfrm_selector *sel,
1760  					  struct xfrm_sec_ctx *ctx, int delete,
1761  					  int *err);
1762  struct xfrm_policy *xfrm_policy_byid(struct net *net,
1763  				     const struct xfrm_mark *mark, u32 if_id,
1764  				     u8 type, int dir, u32 id, int delete,
1765  				     int *err);
1766  int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1767  void xfrm_policy_hash_rebuild(struct net *net);
1768  u32 xfrm_get_acqseq(void);
1769  int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1770  int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1771  		   struct netlink_ext_ack *extack);
1772  struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1773  				 u8 mode, u32 reqid, u32 if_id, u8 proto,
1774  				 const xfrm_address_t *daddr,
1775  				 const xfrm_address_t *saddr, int create,
1776  				 unsigned short family);
1777  int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1778  
1779  #ifdef CONFIG_XFRM_MIGRATE
1780  int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1781  	       const struct xfrm_migrate *m, int num_bundles,
1782  	       const struct xfrm_kmaddress *k,
1783  	       const struct xfrm_encap_tmpl *encap);
1784  struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1785  						u32 if_id);
1786  struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1787  				      struct xfrm_migrate *m,
1788  				      struct xfrm_encap_tmpl *encap);
1789  int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1790  		 struct xfrm_migrate *m, int num_bundles,
1791  		 struct xfrm_kmaddress *k, struct net *net,
1792  		 struct xfrm_encap_tmpl *encap, u32 if_id,
1793  		 struct netlink_ext_ack *extack);
1794  #endif
1795  
1796  int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1797  void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1798  int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1799  	      xfrm_address_t *addr);
1800  
1801  void xfrm_input_init(void);
1802  int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1803  
1804  void xfrm_probe_algs(void);
1805  int xfrm_count_pfkey_auth_supported(void);
1806  int xfrm_count_pfkey_enc_supported(void);
1807  struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1808  struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1809  struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1810  struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1811  struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1812  struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1813  struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1814  struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1815  struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1816  					    int probe);
1817  
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1818  static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1819  				    const xfrm_address_t *b)
1820  {
1821  	return ipv6_addr_equal((const struct in6_addr *)a,
1822  			       (const struct in6_addr *)b);
1823  }
1824  
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1825  static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1826  				   const xfrm_address_t *b,
1827  				   sa_family_t family)
1828  {
1829  	switch (family) {
1830  	default:
1831  	case AF_INET:
1832  		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1833  	case AF_INET6:
1834  		return xfrm6_addr_equal(a, b);
1835  	}
1836  }
1837  
xfrm_policy_id2dir(u32 index)1838  static inline int xfrm_policy_id2dir(u32 index)
1839  {
1840  	return index & 7;
1841  }
1842  
1843  #ifdef CONFIG_XFRM
1844  void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1845  int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1846  void xfrm_replay_notify(struct xfrm_state *x, int event);
1847  int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1848  int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1849  
xfrm_aevent_is_on(struct net * net)1850  static inline int xfrm_aevent_is_on(struct net *net)
1851  {
1852  	struct sock *nlsk;
1853  	int ret = 0;
1854  
1855  	rcu_read_lock();
1856  	nlsk = rcu_dereference(net->xfrm.nlsk);
1857  	if (nlsk)
1858  		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1859  	rcu_read_unlock();
1860  	return ret;
1861  }
1862  
xfrm_acquire_is_on(struct net * net)1863  static inline int xfrm_acquire_is_on(struct net *net)
1864  {
1865  	struct sock *nlsk;
1866  	int ret = 0;
1867  
1868  	rcu_read_lock();
1869  	nlsk = rcu_dereference(net->xfrm.nlsk);
1870  	if (nlsk)
1871  		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1872  	rcu_read_unlock();
1873  
1874  	return ret;
1875  }
1876  #endif
1877  
aead_len(struct xfrm_algo_aead * alg)1878  static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1879  {
1880  	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1881  }
1882  
xfrm_alg_len(const struct xfrm_algo * alg)1883  static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1884  {
1885  	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1886  }
1887  
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)1888  static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1889  {
1890  	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1891  }
1892  
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)1893  static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1894  {
1895  	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1896  }
1897  
1898  #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)1899  static inline int xfrm_replay_clone(struct xfrm_state *x,
1900  				     struct xfrm_state *orig)
1901  {
1902  
1903  	x->replay_esn = kmemdup(orig->replay_esn,
1904  				xfrm_replay_state_esn_len(orig->replay_esn),
1905  				GFP_KERNEL);
1906  	if (!x->replay_esn)
1907  		return -ENOMEM;
1908  	x->preplay_esn = kmemdup(orig->preplay_esn,
1909  				 xfrm_replay_state_esn_len(orig->preplay_esn),
1910  				 GFP_KERNEL);
1911  	if (!x->preplay_esn)
1912  		return -ENOMEM;
1913  
1914  	return 0;
1915  }
1916  
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)1917  static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
1918  {
1919  	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
1920  }
1921  
1922  
xfrm_algo_clone(struct xfrm_algo * orig)1923  static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1924  {
1925  	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1926  }
1927  
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)1928  static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1929  {
1930  	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1931  }
1932  
xfrm_states_put(struct xfrm_state ** states,int n)1933  static inline void xfrm_states_put(struct xfrm_state **states, int n)
1934  {
1935  	int i;
1936  	for (i = 0; i < n; i++)
1937  		xfrm_state_put(*(states + i));
1938  }
1939  
xfrm_states_delete(struct xfrm_state ** states,int n)1940  static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1941  {
1942  	int i;
1943  	for (i = 0; i < n; i++)
1944  		xfrm_state_delete(*(states + i));
1945  }
1946  #endif
1947  
1948  void __init xfrm_dev_init(void);
1949  
1950  #ifdef CONFIG_XFRM_OFFLOAD
1951  void xfrm_dev_resume(struct sk_buff *skb);
1952  void xfrm_dev_backlog(struct softnet_data *sd);
1953  struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
1954  int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1955  		       struct xfrm_user_offload *xuo,
1956  		       struct netlink_ext_ack *extack);
1957  int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
1958  			struct xfrm_user_offload *xuo, u8 dir,
1959  			struct netlink_ext_ack *extack);
1960  bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1961  void xfrm_dev_state_delete(struct xfrm_state *x);
1962  void xfrm_dev_state_free(struct xfrm_state *x);
1963  
xfrm_dev_state_advance_esn(struct xfrm_state * x)1964  static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1965  {
1966  	struct xfrm_dev_offload *xso = &x->xso;
1967  	struct net_device *dev = READ_ONCE(xso->dev);
1968  
1969  	if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
1970  		dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
1971  }
1972  
xfrm_dst_offload_ok(struct dst_entry * dst)1973  static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1974  {
1975  	struct xfrm_state *x = dst->xfrm;
1976  	struct xfrm_dst *xdst;
1977  
1978  	if (!x || !x->type_offload)
1979  		return false;
1980  
1981  	xdst = (struct xfrm_dst *) dst;
1982  	if (!x->xso.offload_handle && !xdst->child->xfrm)
1983  		return true;
1984  	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
1985  	    !xdst->child->xfrm)
1986  		return true;
1987  
1988  	return false;
1989  }
1990  
xfrm_dev_policy_delete(struct xfrm_policy * x)1991  static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
1992  {
1993  	struct xfrm_dev_offload *xdo = &x->xdo;
1994  	struct net_device *dev = xdo->dev;
1995  
1996  	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
1997  		dev->xfrmdev_ops->xdo_dev_policy_delete(x);
1998  }
1999  
xfrm_dev_policy_free(struct xfrm_policy * x)2000  static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2001  {
2002  	struct xfrm_dev_offload *xdo = &x->xdo;
2003  	struct net_device *dev = xdo->dev;
2004  
2005  	if (dev && dev->xfrmdev_ops) {
2006  		if (dev->xfrmdev_ops->xdo_dev_policy_free)
2007  			dev->xfrmdev_ops->xdo_dev_policy_free(x);
2008  		xdo->dev = NULL;
2009  		netdev_put(dev, &xdo->dev_tracker);
2010  	}
2011  }
2012  #else
xfrm_dev_resume(struct sk_buff * skb)2013  static inline void xfrm_dev_resume(struct sk_buff *skb)
2014  {
2015  }
2016  
xfrm_dev_backlog(struct softnet_data * sd)2017  static inline void xfrm_dev_backlog(struct softnet_data *sd)
2018  {
2019  }
2020  
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2021  static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2022  {
2023  	return skb;
2024  }
2025  
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2026  static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2027  {
2028  	return 0;
2029  }
2030  
xfrm_dev_state_delete(struct xfrm_state * x)2031  static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2032  {
2033  }
2034  
xfrm_dev_state_free(struct xfrm_state * x)2035  static inline void xfrm_dev_state_free(struct xfrm_state *x)
2036  {
2037  }
2038  
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2039  static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2040  				      struct xfrm_user_offload *xuo, u8 dir,
2041  				      struct netlink_ext_ack *extack)
2042  {
2043  	return 0;
2044  }
2045  
xfrm_dev_policy_delete(struct xfrm_policy * x)2046  static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2047  {
2048  }
2049  
xfrm_dev_policy_free(struct xfrm_policy * x)2050  static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2051  {
2052  }
2053  
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2054  static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2055  {
2056  	return false;
2057  }
2058  
xfrm_dev_state_advance_esn(struct xfrm_state * x)2059  static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2060  {
2061  }
2062  
xfrm_dst_offload_ok(struct dst_entry * dst)2063  static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2064  {
2065  	return false;
2066  }
2067  #endif
2068  
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2069  static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2070  {
2071  	if (attrs[XFRMA_MARK])
2072  		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2073  	else
2074  		m->v = m->m = 0;
2075  
2076  	return m->v & m->m;
2077  }
2078  
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2079  static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2080  {
2081  	int ret = 0;
2082  
2083  	if (m->m | m->v)
2084  		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2085  	return ret;
2086  }
2087  
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2088  static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2089  {
2090  	struct xfrm_mark *m = &x->props.smark;
2091  
2092  	return (m->v & m->m) | (mark & ~m->m);
2093  }
2094  
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2095  static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2096  {
2097  	int ret = 0;
2098  
2099  	if (if_id)
2100  		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2101  	return ret;
2102  }
2103  
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2104  static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2105  				    unsigned int family)
2106  {
2107  	bool tunnel = false;
2108  
2109  	switch(family) {
2110  	case AF_INET:
2111  		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2112  			tunnel = true;
2113  		break;
2114  	case AF_INET6:
2115  		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2116  			tunnel = true;
2117  		break;
2118  	}
2119  	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2120  		return -EINVAL;
2121  
2122  	return 0;
2123  }
2124  
2125  extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2126  extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2127  
2128  struct xfrm_translator {
2129  	/* Allocate frag_list and put compat translation there */
2130  	int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2131  
2132  	/* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2133  	struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2134  			int maxtype, const struct nla_policy *policy,
2135  			struct netlink_ext_ack *extack);
2136  
2137  	/* Translate 32-bit user_policy from sockptr */
2138  	int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2139  
2140  	struct module *owner;
2141  };
2142  
2143  #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2144  extern int xfrm_register_translator(struct xfrm_translator *xtr);
2145  extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2146  extern struct xfrm_translator *xfrm_get_translator(void);
2147  extern void xfrm_put_translator(struct xfrm_translator *xtr);
2148  #else
xfrm_get_translator(void)2149  static inline struct xfrm_translator *xfrm_get_translator(void)
2150  {
2151  	return NULL;
2152  }
xfrm_put_translator(struct xfrm_translator * xtr)2153  static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2154  {
2155  }
2156  #endif
2157  
2158  #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2159  static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2160  {
2161  	int proto;
2162  
2163  	if (!sk || sk->sk_family != AF_INET6)
2164  		return false;
2165  
2166  	proto = sk->sk_protocol;
2167  	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2168  		return inet6_sk(sk)->dontfrag;
2169  
2170  	return false;
2171  }
2172  #endif
2173  
2174  #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2175      (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2176  
2177  extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2178  
2179  int register_xfrm_interface_bpf(void);
2180  
2181  #else
2182  
register_xfrm_interface_bpf(void)2183  static inline int register_xfrm_interface_bpf(void)
2184  {
2185  	return 0;
2186  }
2187  
2188  #endif
2189  
2190  #endif	/* _NET_XFRM_H */
2191