xref: /openbmc/linux/include/net/xfrm.h (revision 95e9fd10)
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3 
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 #include <linux/slab.h>
16 
17 #include <net/sock.h>
18 #include <net/dst.h>
19 #include <net/ip.h>
20 #include <net/route.h>
21 #include <net/ipv6.h>
22 #include <net/ip6_fib.h>
23 #include <net/flow.h>
24 
25 #include <linux/interrupt.h>
26 
27 #ifdef CONFIG_XFRM_STATISTICS
28 #include <net/snmp.h>
29 #endif
30 
31 #define XFRM_PROTO_ESP		50
32 #define XFRM_PROTO_AH		51
33 #define XFRM_PROTO_COMP		108
34 #define XFRM_PROTO_IPIP		4
35 #define XFRM_PROTO_IPV6		41
36 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
37 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
38 
39 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
40 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
41 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
42 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
43 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
44 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
45 
46 #ifdef CONFIG_XFRM_STATISTICS
47 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
48 #define XFRM_INC_STATS_BH(net, field)	SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
49 #define XFRM_INC_STATS_USER(net, field)	SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
50 #else
51 #define XFRM_INC_STATS(net, field)	((void)(net))
52 #define XFRM_INC_STATS_BH(net, field)	((void)(net))
53 #define XFRM_INC_STATS_USER(net, field)	((void)(net))
54 #endif
55 
56 extern struct mutex xfrm_cfg_mutex;
57 
58 /* Organization of SPD aka "XFRM rules"
59    ------------------------------------
60 
61    Basic objects:
62    - policy rule, struct xfrm_policy (=SPD entry)
63    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
64    - instance of a transformer, struct xfrm_state (=SA)
65    - template to clone xfrm_state, struct xfrm_tmpl
66 
67    SPD is plain linear list of xfrm_policy rules, ordered by priority.
68    (To be compatible with existing pfkeyv2 implementations,
69    many rules with priority of 0x7fffffff are allowed to exist and
70    such rules are ordered in an unpredictable way, thanks to bsd folks.)
71 
72    Lookup is plain linear search until the first match with selector.
73 
74    If "action" is "block", then we prohibit the flow, otherwise:
75    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
76    policy entry has list of up to XFRM_MAX_DEPTH transformations,
77    described by templates xfrm_tmpl. Each template is resolved
78    to a complete xfrm_state (see below) and we pack bundle of transformations
79    to a dst_entry returned to requestor.
80 
81    dst -. xfrm  .-> xfrm_state #1
82     |---. child .-> dst -. xfrm .-> xfrm_state #2
83                      |---. child .-> dst -. xfrm .-> xfrm_state #3
84                                       |---. child .-> NULL
85 
86    Bundles are cached at xrfm_policy struct (field ->bundles).
87 
88 
89    Resolution of xrfm_tmpl
90    -----------------------
91    Template contains:
92    1. ->mode		Mode: transport or tunnel
93    2. ->id.proto	Protocol: AH/ESP/IPCOMP
94    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
95       Q: allow to resolve security gateway?
96    4. ->id.spi          If not zero, static SPI.
97    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
98    6. ->algos		List of allowed algos. Plain bitmask now.
99       Q: ealgos, aalgos, calgos. What a mess...
100    7. ->share		Sharing mode.
101       Q: how to implement private sharing mode? To add struct sock* to
102       flow id?
103 
104    Having this template we search through SAD searching for entries
105    with appropriate mode/proto/algo, permitted by selector.
106    If no appropriate entry found, it is requested from key manager.
107 
108    PROBLEMS:
109    Q: How to find all the bundles referring to a physical path for
110       PMTU discovery? Seems, dst should contain list of all parents...
111       and enter to infinite locking hierarchy disaster.
112       No! It is easier, we will not search for them, let them find us.
113       We add genid to each dst plus pointer to genid of raw IP route,
114       pmtu disc will update pmtu on raw IP route and increase its genid.
115       dst_check() will see this for top level and trigger resyncing
116       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
117  */
118 
119 struct xfrm_state_walk {
120 	struct list_head	all;
121 	u8			state;
122 	union {
123 		u8		dying;
124 		u8		proto;
125 	};
126 	u32			seq;
127 };
128 
129 /* Full description of state of transformer. */
130 struct xfrm_state {
131 #ifdef CONFIG_NET_NS
132 	struct net		*xs_net;
133 #endif
134 	union {
135 		struct hlist_node	gclist;
136 		struct hlist_node	bydst;
137 	};
138 	struct hlist_node	bysrc;
139 	struct hlist_node	byspi;
140 
141 	atomic_t		refcnt;
142 	spinlock_t		lock;
143 
144 	struct xfrm_id		id;
145 	struct xfrm_selector	sel;
146 	struct xfrm_mark	mark;
147 	u32			tfcpad;
148 
149 	u32			genid;
150 
151 	/* Key manager bits */
152 	struct xfrm_state_walk	km;
153 
154 	/* Parameters of this state. */
155 	struct {
156 		u32		reqid;
157 		u8		mode;
158 		u8		replay_window;
159 		u8		aalgo, ealgo, calgo;
160 		u8		flags;
161 		u16		family;
162 		xfrm_address_t	saddr;
163 		int		header_len;
164 		int		trailer_len;
165 	} props;
166 
167 	struct xfrm_lifetime_cfg lft;
168 
169 	/* Data for transformer */
170 	struct xfrm_algo_auth	*aalg;
171 	struct xfrm_algo	*ealg;
172 	struct xfrm_algo	*calg;
173 	struct xfrm_algo_aead	*aead;
174 
175 	/* Data for encapsulator */
176 	struct xfrm_encap_tmpl	*encap;
177 
178 	/* Data for care-of address */
179 	xfrm_address_t	*coaddr;
180 
181 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
182 	struct xfrm_state	*tunnel;
183 
184 	/* If a tunnel, number of users + 1 */
185 	atomic_t		tunnel_users;
186 
187 	/* State for replay detection */
188 	struct xfrm_replay_state replay;
189 	struct xfrm_replay_state_esn *replay_esn;
190 
191 	/* Replay detection state at the time we sent the last notification */
192 	struct xfrm_replay_state preplay;
193 	struct xfrm_replay_state_esn *preplay_esn;
194 
195 	/* The functions for replay detection. */
196 	struct xfrm_replay	*repl;
197 
198 	/* internal flag that only holds state for delayed aevent at the
199 	 * moment
200 	*/
201 	u32			xflags;
202 
203 	/* Replay detection notification settings */
204 	u32			replay_maxage;
205 	u32			replay_maxdiff;
206 
207 	/* Replay detection notification timer */
208 	struct timer_list	rtimer;
209 
210 	/* Statistics */
211 	struct xfrm_stats	stats;
212 
213 	struct xfrm_lifetime_cur curlft;
214 	struct tasklet_hrtimer	mtimer;
215 
216 	/* used to fix curlft->add_time when changing date */
217 	long		saved_tmo;
218 
219 	/* Last used time */
220 	unsigned long		lastused;
221 
222 	/* Reference to data common to all the instances of this
223 	 * transformer. */
224 	const struct xfrm_type	*type;
225 	struct xfrm_mode	*inner_mode;
226 	struct xfrm_mode	*inner_mode_iaf;
227 	struct xfrm_mode	*outer_mode;
228 
229 	/* Security context */
230 	struct xfrm_sec_ctx	*security;
231 
232 	/* Private data of this transformer, format is opaque,
233 	 * interpreted by xfrm_type methods. */
234 	void			*data;
235 };
236 
237 static inline struct net *xs_net(struct xfrm_state *x)
238 {
239 	return read_pnet(&x->xs_net);
240 }
241 
242 /* xflags - make enum if more show up */
243 #define XFRM_TIME_DEFER	1
244 #define XFRM_SOFT_EXPIRE 2
245 
246 enum {
247 	XFRM_STATE_VOID,
248 	XFRM_STATE_ACQ,
249 	XFRM_STATE_VALID,
250 	XFRM_STATE_ERROR,
251 	XFRM_STATE_EXPIRED,
252 	XFRM_STATE_DEAD
253 };
254 
255 /* callback structure passed from either netlink or pfkey */
256 struct km_event {
257 	union {
258 		u32 hard;
259 		u32 proto;
260 		u32 byid;
261 		u32 aevent;
262 		u32 type;
263 	} data;
264 
265 	u32	seq;
266 	u32	pid;
267 	u32	event;
268 	struct net *net;
269 };
270 
271 struct xfrm_replay {
272 	void	(*advance)(struct xfrm_state *x, __be32 net_seq);
273 	int	(*check)(struct xfrm_state *x,
274 			 struct sk_buff *skb,
275 			 __be32 net_seq);
276 	void	(*notify)(struct xfrm_state *x, int event);
277 	int	(*overflow)(struct xfrm_state *x, struct sk_buff *skb);
278 };
279 
280 struct net_device;
281 struct xfrm_type;
282 struct xfrm_dst;
283 struct xfrm_policy_afinfo {
284 	unsigned short		family;
285 	struct dst_ops		*dst_ops;
286 	void			(*garbage_collect)(struct net *net);
287 	struct dst_entry	*(*dst_lookup)(struct net *net, int tos,
288 					       const xfrm_address_t *saddr,
289 					       const xfrm_address_t *daddr);
290 	int			(*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
291 	void			(*decode_session)(struct sk_buff *skb,
292 						  struct flowi *fl,
293 						  int reverse);
294 	int			(*get_tos)(const struct flowi *fl);
295 	void			(*init_dst)(struct net *net,
296 					    struct xfrm_dst *dst);
297 	int			(*init_path)(struct xfrm_dst *path,
298 					     struct dst_entry *dst,
299 					     int nfheader_len);
300 	int			(*fill_dst)(struct xfrm_dst *xdst,
301 					    struct net_device *dev,
302 					    const struct flowi *fl);
303 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
304 };
305 
306 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
307 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
308 extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
309 extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
310 
311 struct xfrm_tmpl;
312 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
313 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
314 extern int __xfrm_state_delete(struct xfrm_state *x);
315 
316 struct xfrm_state_afinfo {
317 	unsigned int		family;
318 	unsigned int		proto;
319 	__be16			eth_proto;
320 	struct module		*owner;
321 	const struct xfrm_type	*type_map[IPPROTO_MAX];
322 	struct xfrm_mode	*mode_map[XFRM_MODE_MAX];
323 	int			(*init_flags)(struct xfrm_state *x);
324 	void			(*init_tempsel)(struct xfrm_selector *sel,
325 						const struct flowi *fl);
326 	void			(*init_temprop)(struct xfrm_state *x,
327 						const struct xfrm_tmpl *tmpl,
328 						const xfrm_address_t *daddr,
329 						const xfrm_address_t *saddr);
330 	int			(*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
331 	int			(*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
332 	int			(*output)(struct sk_buff *skb);
333 	int			(*output_finish)(struct sk_buff *skb);
334 	int			(*extract_input)(struct xfrm_state *x,
335 						 struct sk_buff *skb);
336 	int			(*extract_output)(struct xfrm_state *x,
337 						  struct sk_buff *skb);
338 	int			(*transport_finish)(struct sk_buff *skb,
339 						    int async);
340 };
341 
342 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
343 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
344 
345 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
346 
347 struct xfrm_type {
348 	char			*description;
349 	struct module		*owner;
350 	u8			proto;
351 	u8			flags;
352 #define XFRM_TYPE_NON_FRAGMENT	1
353 #define XFRM_TYPE_REPLAY_PROT	2
354 #define XFRM_TYPE_LOCAL_COADDR	4
355 #define XFRM_TYPE_REMOTE_COADDR	8
356 
357 	int			(*init_state)(struct xfrm_state *x);
358 	void			(*destructor)(struct xfrm_state *);
359 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
360 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
361 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
362 					  const struct flowi *);
363 	int			(*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
364 	/* Estimate maximal size of result of transformation of a dgram */
365 	u32			(*get_mtu)(struct xfrm_state *, int size);
366 };
367 
368 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
369 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
370 
371 struct xfrm_mode {
372 	/*
373 	 * Remove encapsulation header.
374 	 *
375 	 * The IP header will be moved over the top of the encapsulation
376 	 * header.
377 	 *
378 	 * On entry, the transport header shall point to where the IP header
379 	 * should be and the network header shall be set to where the IP
380 	 * header currently is.  skb->data shall point to the start of the
381 	 * payload.
382 	 */
383 	int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
384 
385 	/*
386 	 * This is the actual input entry point.
387 	 *
388 	 * For transport mode and equivalent this would be identical to
389 	 * input2 (which does not need to be set).  While tunnel mode
390 	 * and equivalent would set this to the tunnel encapsulation function
391 	 * xfrm4_prepare_input that would in turn call input2.
392 	 */
393 	int (*input)(struct xfrm_state *x, struct sk_buff *skb);
394 
395 	/*
396 	 * Add encapsulation header.
397 	 *
398 	 * On exit, the transport header will be set to the start of the
399 	 * encapsulation header to be filled in by x->type->output and
400 	 * the mac header will be set to the nextheader (protocol for
401 	 * IPv4) field of the extension header directly preceding the
402 	 * encapsulation header, or in its absence, that of the top IP
403 	 * header.  The value of the network header will always point
404 	 * to the top IP header while skb->data will point to the payload.
405 	 */
406 	int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
407 
408 	/*
409 	 * This is the actual output entry point.
410 	 *
411 	 * For transport mode and equivalent this would be identical to
412 	 * output2 (which does not need to be set).  While tunnel mode
413 	 * and equivalent would set this to a tunnel encapsulation function
414 	 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
415 	 * call output2.
416 	 */
417 	int (*output)(struct xfrm_state *x, struct sk_buff *skb);
418 
419 	struct xfrm_state_afinfo *afinfo;
420 	struct module *owner;
421 	unsigned int encap;
422 	int flags;
423 };
424 
425 /* Flags for xfrm_mode. */
426 enum {
427 	XFRM_MODE_FLAG_TUNNEL = 1,
428 };
429 
430 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
431 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
432 
433 static inline int xfrm_af2proto(unsigned int family)
434 {
435 	switch(family) {
436 	case AF_INET:
437 		return IPPROTO_IPIP;
438 	case AF_INET6:
439 		return IPPROTO_IPV6;
440 	default:
441 		return 0;
442 	}
443 }
444 
445 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
446 {
447 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
448 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
449 		return x->inner_mode;
450 	else
451 		return x->inner_mode_iaf;
452 }
453 
454 struct xfrm_tmpl {
455 /* id in template is interpreted as:
456  * daddr - destination of tunnel, may be zero for transport mode.
457  * spi   - zero to acquire spi. Not zero if spi is static, then
458  *	   daddr must be fixed too.
459  * proto - AH/ESP/IPCOMP
460  */
461 	struct xfrm_id		id;
462 
463 /* Source address of tunnel. Ignored, if it is not a tunnel. */
464 	xfrm_address_t		saddr;
465 
466 	unsigned short		encap_family;
467 
468 	u32			reqid;
469 
470 /* Mode: transport, tunnel etc. */
471 	u8			mode;
472 
473 /* Sharing mode: unique, this session only, this user only etc. */
474 	u8			share;
475 
476 /* May skip this transfomration if no SA is found */
477 	u8			optional;
478 
479 /* Skip aalgos/ealgos/calgos checks. */
480 	u8			allalgs;
481 
482 /* Bit mask of algos allowed for acquisition */
483 	u32			aalgos;
484 	u32			ealgos;
485 	u32			calgos;
486 };
487 
488 #define XFRM_MAX_DEPTH		6
489 
490 struct xfrm_policy_walk_entry {
491 	struct list_head	all;
492 	u8			dead;
493 };
494 
495 struct xfrm_policy_walk {
496 	struct xfrm_policy_walk_entry walk;
497 	u8 type;
498 	u32 seq;
499 };
500 
501 struct xfrm_policy {
502 #ifdef CONFIG_NET_NS
503 	struct net		*xp_net;
504 #endif
505 	struct hlist_node	bydst;
506 	struct hlist_node	byidx;
507 
508 	/* This lock only affects elements except for entry. */
509 	rwlock_t		lock;
510 	atomic_t		refcnt;
511 	struct timer_list	timer;
512 
513 	struct flow_cache_object flo;
514 	atomic_t		genid;
515 	u32			priority;
516 	u32			index;
517 	struct xfrm_mark	mark;
518 	struct xfrm_selector	selector;
519 	struct xfrm_lifetime_cfg lft;
520 	struct xfrm_lifetime_cur curlft;
521 	struct xfrm_policy_walk_entry walk;
522 	u8			type;
523 	u8			action;
524 	u8			flags;
525 	u8			xfrm_nr;
526 	u16			family;
527 	struct xfrm_sec_ctx	*security;
528 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
529 };
530 
531 static inline struct net *xp_net(const struct xfrm_policy *xp)
532 {
533 	return read_pnet(&xp->xp_net);
534 }
535 
536 struct xfrm_kmaddress {
537 	xfrm_address_t          local;
538 	xfrm_address_t          remote;
539 	u32			reserved;
540 	u16			family;
541 };
542 
543 struct xfrm_migrate {
544 	xfrm_address_t		old_daddr;
545 	xfrm_address_t		old_saddr;
546 	xfrm_address_t		new_daddr;
547 	xfrm_address_t		new_saddr;
548 	u8			proto;
549 	u8			mode;
550 	u16			reserved;
551 	u32			reqid;
552 	u16			old_family;
553 	u16			new_family;
554 };
555 
556 #define XFRM_KM_TIMEOUT                30
557 /* which seqno */
558 #define XFRM_REPLAY_SEQ		1
559 #define XFRM_REPLAY_OSEQ	2
560 #define XFRM_REPLAY_SEQ_MASK	3
561 /* what happened */
562 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
563 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
564 
565 /* default aevent timeout in units of 100ms */
566 #define XFRM_AE_ETIME			10
567 /* Async Event timer multiplier */
568 #define XFRM_AE_ETH_M			10
569 /* default seq threshold size */
570 #define XFRM_AE_SEQT_SIZE		2
571 
572 struct xfrm_mgr {
573 	struct list_head	list;
574 	char			*id;
575 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
576 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
577 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
578 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
579 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
580 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
581 	int			(*migrate)(const struct xfrm_selector *sel,
582 					   u8 dir, u8 type,
583 					   const struct xfrm_migrate *m,
584 					   int num_bundles,
585 					   const struct xfrm_kmaddress *k);
586 };
587 
588 extern int xfrm_register_km(struct xfrm_mgr *km);
589 extern int xfrm_unregister_km(struct xfrm_mgr *km);
590 
591 /*
592  * This structure is used for the duration where packets are being
593  * transformed by IPsec.  As soon as the packet leaves IPsec the
594  * area beyond the generic IP part may be overwritten.
595  */
596 struct xfrm_skb_cb {
597 	union {
598 		struct inet_skb_parm h4;
599 		struct inet6_skb_parm h6;
600         } header;
601 
602         /* Sequence number for replay protection. */
603 	union {
604 		struct {
605 			__u32 low;
606 			__u32 hi;
607 		} output;
608 		struct {
609 			__be32 low;
610 			__be32 hi;
611 		} input;
612 	} seq;
613 };
614 
615 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
616 
617 /*
618  * This structure is used by the afinfo prepare_input/prepare_output functions
619  * to transmit header information to the mode input/output functions.
620  */
621 struct xfrm_mode_skb_cb {
622 	union {
623 		struct inet_skb_parm h4;
624 		struct inet6_skb_parm h6;
625 	} header;
626 
627 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
628 	__be16 id;
629 	__be16 frag_off;
630 
631 	/* IP header length (excluding options or extension headers). */
632 	u8 ihl;
633 
634 	/* TOS for IPv4, class for IPv6. */
635 	u8 tos;
636 
637 	/* TTL for IPv4, hop limitfor IPv6. */
638 	u8 ttl;
639 
640 	/* Protocol for IPv4, NH for IPv6. */
641 	u8 protocol;
642 
643 	/* Option length for IPv4, zero for IPv6. */
644 	u8 optlen;
645 
646 	/* Used by IPv6 only, zero for IPv4. */
647 	u8 flow_lbl[3];
648 };
649 
650 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
651 
652 /*
653  * This structure is used by the input processing to locate the SPI and
654  * related information.
655  */
656 struct xfrm_spi_skb_cb {
657 	union {
658 		struct inet_skb_parm h4;
659 		struct inet6_skb_parm h6;
660 	} header;
661 
662 	unsigned int daddroff;
663 	unsigned int family;
664 };
665 
666 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
667 
668 /* Audit Information */
669 struct xfrm_audit {
670 	u32	secid;
671 	uid_t	loginuid;
672 	u32	sessionid;
673 };
674 
675 #ifdef CONFIG_AUDITSYSCALL
676 static inline struct audit_buffer *xfrm_audit_start(const char *op)
677 {
678 	struct audit_buffer *audit_buf = NULL;
679 
680 	if (audit_enabled == 0)
681 		return NULL;
682 	audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
683 				    AUDIT_MAC_IPSEC_EVENT);
684 	if (audit_buf == NULL)
685 		return NULL;
686 	audit_log_format(audit_buf, "op=%s", op);
687 	return audit_buf;
688 }
689 
690 static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
691 					     struct audit_buffer *audit_buf)
692 {
693 	char *secctx;
694 	u32 secctx_len;
695 
696 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
697 	if (secid != 0 &&
698 	    security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
699 		audit_log_format(audit_buf, " subj=%s", secctx);
700 		security_release_secctx(secctx, secctx_len);
701 	} else
702 		audit_log_task_context(audit_buf);
703 }
704 
705 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
706 				  u32 auid, u32 ses, u32 secid);
707 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
708 				  u32 auid, u32 ses, u32 secid);
709 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
710 				 u32 auid, u32 ses, u32 secid);
711 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
712 				    u32 auid, u32 ses, u32 secid);
713 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
714 					     struct sk_buff *skb);
715 extern void xfrm_audit_state_replay(struct xfrm_state *x,
716 				    struct sk_buff *skb, __be32 net_seq);
717 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
718 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
719 				      __be32 net_spi, __be32 net_seq);
720 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
721 				     struct sk_buff *skb, u8 proto);
722 #else
723 
724 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
725 				  u32 auid, u32 ses, u32 secid)
726 {
727 }
728 
729 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
730 				  u32 auid, u32 ses, u32 secid)
731 {
732 }
733 
734 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
735 				 u32 auid, u32 ses, u32 secid)
736 {
737 }
738 
739 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
740 				    u32 auid, u32 ses, u32 secid)
741 {
742 }
743 
744 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
745 					     struct sk_buff *skb)
746 {
747 }
748 
749 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
750 					   struct sk_buff *skb, __be32 net_seq)
751 {
752 }
753 
754 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
755 				      u16 family)
756 {
757 }
758 
759 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
760 				      __be32 net_spi, __be32 net_seq)
761 {
762 }
763 
764 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
765 				     struct sk_buff *skb, u8 proto)
766 {
767 }
768 #endif /* CONFIG_AUDITSYSCALL */
769 
770 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
771 {
772 	if (likely(policy != NULL))
773 		atomic_inc(&policy->refcnt);
774 }
775 
776 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
777 
778 static inline void xfrm_pol_put(struct xfrm_policy *policy)
779 {
780 	if (atomic_dec_and_test(&policy->refcnt))
781 		xfrm_policy_destroy(policy);
782 }
783 
784 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
785 {
786 	int i;
787 	for (i = npols - 1; i >= 0; --i)
788 		xfrm_pol_put(pols[i]);
789 }
790 
791 extern void __xfrm_state_destroy(struct xfrm_state *);
792 
793 static inline void __xfrm_state_put(struct xfrm_state *x)
794 {
795 	atomic_dec(&x->refcnt);
796 }
797 
798 static inline void xfrm_state_put(struct xfrm_state *x)
799 {
800 	if (atomic_dec_and_test(&x->refcnt))
801 		__xfrm_state_destroy(x);
802 }
803 
804 static inline void xfrm_state_hold(struct xfrm_state *x)
805 {
806 	atomic_inc(&x->refcnt);
807 }
808 
809 static inline bool addr_match(const void *token1, const void *token2,
810 			      int prefixlen)
811 {
812 	const __be32 *a1 = token1;
813 	const __be32 *a2 = token2;
814 	int pdw;
815 	int pbi;
816 
817 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
818 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
819 
820 	if (pdw)
821 		if (memcmp(a1, a2, pdw << 2))
822 			return false;
823 
824 	if (pbi) {
825 		__be32 mask;
826 
827 		mask = htonl((0xffffffff) << (32 - pbi));
828 
829 		if ((a1[pdw] ^ a2[pdw]) & mask)
830 			return false;
831 	}
832 
833 	return true;
834 }
835 
836 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
837 {
838 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
839 	if (prefixlen == 0)
840 		return true;
841 	return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
842 }
843 
844 static __inline__
845 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
846 {
847 	__be16 port;
848 	switch(fl->flowi_proto) {
849 	case IPPROTO_TCP:
850 	case IPPROTO_UDP:
851 	case IPPROTO_UDPLITE:
852 	case IPPROTO_SCTP:
853 		port = uli->ports.sport;
854 		break;
855 	case IPPROTO_ICMP:
856 	case IPPROTO_ICMPV6:
857 		port = htons(uli->icmpt.type);
858 		break;
859 	case IPPROTO_MH:
860 		port = htons(uli->mht.type);
861 		break;
862 	case IPPROTO_GRE:
863 		port = htons(ntohl(uli->gre_key) >> 16);
864 		break;
865 	default:
866 		port = 0;	/*XXX*/
867 	}
868 	return port;
869 }
870 
871 static __inline__
872 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
873 {
874 	__be16 port;
875 	switch(fl->flowi_proto) {
876 	case IPPROTO_TCP:
877 	case IPPROTO_UDP:
878 	case IPPROTO_UDPLITE:
879 	case IPPROTO_SCTP:
880 		port = uli->ports.dport;
881 		break;
882 	case IPPROTO_ICMP:
883 	case IPPROTO_ICMPV6:
884 		port = htons(uli->icmpt.code);
885 		break;
886 	case IPPROTO_GRE:
887 		port = htons(ntohl(uli->gre_key) & 0xffff);
888 		break;
889 	default:
890 		port = 0;	/*XXX*/
891 	}
892 	return port;
893 }
894 
895 extern bool xfrm_selector_match(const struct xfrm_selector *sel,
896 				const struct flowi *fl,
897 				unsigned short family);
898 
899 #ifdef CONFIG_SECURITY_NETWORK_XFRM
900 /*	If neither has a context --> match
901  * 	Otherwise, both must have a context and the sids, doi, alg must match
902  */
903 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
904 {
905 	return ((!s1 && !s2) ||
906 		(s1 && s2 &&
907 		 (s1->ctx_sid == s2->ctx_sid) &&
908 		 (s1->ctx_doi == s2->ctx_doi) &&
909 		 (s1->ctx_alg == s2->ctx_alg)));
910 }
911 #else
912 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
913 {
914 	return true;
915 }
916 #endif
917 
918 /* A struct encoding bundle of transformations to apply to some set of flow.
919  *
920  * dst->child points to the next element of bundle.
921  * dst->xfrm  points to an instanse of transformer.
922  *
923  * Due to unfortunate limitations of current routing cache, which we
924  * have no time to fix, it mirrors struct rtable and bound to the same
925  * routing key, including saddr,daddr. However, we can have many of
926  * bundles differing by session id. All the bundles grow from a parent
927  * policy rule.
928  */
929 struct xfrm_dst {
930 	union {
931 		struct dst_entry	dst;
932 		struct rtable		rt;
933 		struct rt6_info		rt6;
934 	} u;
935 	struct dst_entry *route;
936 	struct flow_cache_object flo;
937 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
938 	int num_pols, num_xfrms;
939 #ifdef CONFIG_XFRM_SUB_POLICY
940 	struct flowi *origin;
941 	struct xfrm_selector *partner;
942 #endif
943 	u32 xfrm_genid;
944 	u32 policy_genid;
945 	u32 route_mtu_cached;
946 	u32 child_mtu_cached;
947 	u32 route_cookie;
948 	u32 path_cookie;
949 };
950 
951 #ifdef CONFIG_XFRM
952 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
953 {
954 	xfrm_pols_put(xdst->pols, xdst->num_pols);
955 	dst_release(xdst->route);
956 	if (likely(xdst->u.dst.xfrm))
957 		xfrm_state_put(xdst->u.dst.xfrm);
958 #ifdef CONFIG_XFRM_SUB_POLICY
959 	kfree(xdst->origin);
960 	xdst->origin = NULL;
961 	kfree(xdst->partner);
962 	xdst->partner = NULL;
963 #endif
964 }
965 #endif
966 
967 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
968 
969 struct sec_path {
970 	atomic_t		refcnt;
971 	int			len;
972 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
973 };
974 
975 static inline int secpath_exists(struct sk_buff *skb)
976 {
977 #ifdef CONFIG_XFRM
978 	return skb->sp != NULL;
979 #else
980 	return 0;
981 #endif
982 }
983 
984 static inline struct sec_path *
985 secpath_get(struct sec_path *sp)
986 {
987 	if (sp)
988 		atomic_inc(&sp->refcnt);
989 	return sp;
990 }
991 
992 extern void __secpath_destroy(struct sec_path *sp);
993 
994 static inline void
995 secpath_put(struct sec_path *sp)
996 {
997 	if (sp && atomic_dec_and_test(&sp->refcnt))
998 		__secpath_destroy(sp);
999 }
1000 
1001 extern struct sec_path *secpath_dup(struct sec_path *src);
1002 
1003 static inline void
1004 secpath_reset(struct sk_buff *skb)
1005 {
1006 #ifdef CONFIG_XFRM
1007 	secpath_put(skb->sp);
1008 	skb->sp = NULL;
1009 #endif
1010 }
1011 
1012 static inline int
1013 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1014 {
1015 	switch (family) {
1016 	case AF_INET:
1017 		return addr->a4 == 0;
1018 	case AF_INET6:
1019 		return ipv6_addr_any((struct in6_addr *)&addr->a6);
1020 	}
1021 	return 0;
1022 }
1023 
1024 static inline int
1025 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1026 {
1027 	return	(tmpl->saddr.a4 &&
1028 		 tmpl->saddr.a4 != x->props.saddr.a4);
1029 }
1030 
1031 static inline int
1032 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1033 {
1034 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1035 		 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1036 }
1037 
1038 static inline int
1039 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1040 {
1041 	switch (family) {
1042 	case AF_INET:
1043 		return __xfrm4_state_addr_cmp(tmpl, x);
1044 	case AF_INET6:
1045 		return __xfrm6_state_addr_cmp(tmpl, x);
1046 	}
1047 	return !0;
1048 }
1049 
1050 #ifdef CONFIG_XFRM
1051 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
1052 
1053 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1054 				       struct sk_buff *skb,
1055 				       unsigned int family, int reverse)
1056 {
1057 	struct net *net = dev_net(skb->dev);
1058 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1059 
1060 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1061 		return __xfrm_policy_check(sk, ndir, skb, family);
1062 
1063 	return	(!net->xfrm.policy_count[dir] && !skb->sp) ||
1064 		(skb_dst(skb)->flags & DST_NOPOLICY) ||
1065 		__xfrm_policy_check(sk, ndir, skb, family);
1066 }
1067 
1068 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1069 {
1070 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1071 }
1072 
1073 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1074 {
1075 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1076 }
1077 
1078 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1079 {
1080 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1081 }
1082 
1083 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1084 					     struct sk_buff *skb)
1085 {
1086 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1087 }
1088 
1089 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1090 					     struct sk_buff *skb)
1091 {
1092 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1093 }
1094 
1095 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1096 				 unsigned int family, int reverse);
1097 
1098 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1099 				      unsigned int family)
1100 {
1101 	return __xfrm_decode_session(skb, fl, family, 0);
1102 }
1103 
1104 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1105 					      struct flowi *fl,
1106 					      unsigned int family)
1107 {
1108 	return __xfrm_decode_session(skb, fl, family, 1);
1109 }
1110 
1111 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1112 
1113 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1114 {
1115 	struct net *net = dev_net(skb->dev);
1116 
1117 	return	!net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1118 		(skb_dst(skb)->flags & DST_NOXFRM) ||
1119 		__xfrm_route_forward(skb, family);
1120 }
1121 
1122 static inline int xfrm4_route_forward(struct sk_buff *skb)
1123 {
1124 	return xfrm_route_forward(skb, AF_INET);
1125 }
1126 
1127 static inline int xfrm6_route_forward(struct sk_buff *skb)
1128 {
1129 	return xfrm_route_forward(skb, AF_INET6);
1130 }
1131 
1132 extern int __xfrm_sk_clone_policy(struct sock *sk);
1133 
1134 static inline int xfrm_sk_clone_policy(struct sock *sk)
1135 {
1136 	if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
1137 		return __xfrm_sk_clone_policy(sk);
1138 	return 0;
1139 }
1140 
1141 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1142 
1143 static inline void xfrm_sk_free_policy(struct sock *sk)
1144 {
1145 	if (unlikely(sk->sk_policy[0] != NULL)) {
1146 		xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
1147 		sk->sk_policy[0] = NULL;
1148 	}
1149 	if (unlikely(sk->sk_policy[1] != NULL)) {
1150 		xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
1151 		sk->sk_policy[1] = NULL;
1152 	}
1153 }
1154 
1155 #else
1156 
1157 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1158 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
1159 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1160 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1161 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1162 {
1163 	return 1;
1164 }
1165 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1166 {
1167 	return 1;
1168 }
1169 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1170 {
1171 	return 1;
1172 }
1173 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1174 					      struct flowi *fl,
1175 					      unsigned int family)
1176 {
1177 	return -ENOSYS;
1178 }
1179 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1180 					     struct sk_buff *skb)
1181 {
1182 	return 1;
1183 }
1184 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1185 					     struct sk_buff *skb)
1186 {
1187 	return 1;
1188 }
1189 #endif
1190 
1191 static __inline__
1192 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1193 {
1194 	switch (family){
1195 	case AF_INET:
1196 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1197 	case AF_INET6:
1198 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1199 	}
1200 	return NULL;
1201 }
1202 
1203 static __inline__
1204 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1205 {
1206 	switch (family){
1207 	case AF_INET:
1208 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1209 	case AF_INET6:
1210 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1211 	}
1212 	return NULL;
1213 }
1214 
1215 static __inline__
1216 void xfrm_flowi_addr_get(const struct flowi *fl,
1217 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1218 			 unsigned short family)
1219 {
1220 	switch(family) {
1221 	case AF_INET:
1222 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1223 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1224 		break;
1225 	case AF_INET6:
1226 		*(struct in6_addr *)saddr->a6 = fl->u.ip6.saddr;
1227 		*(struct in6_addr *)daddr->a6 = fl->u.ip6.daddr;
1228 		break;
1229 	}
1230 }
1231 
1232 static __inline__ int
1233 __xfrm4_state_addr_check(const struct xfrm_state *x,
1234 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1235 {
1236 	if (daddr->a4 == x->id.daddr.a4 &&
1237 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1238 		return 1;
1239 	return 0;
1240 }
1241 
1242 static __inline__ int
1243 __xfrm6_state_addr_check(const struct xfrm_state *x,
1244 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1245 {
1246 	if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1247 	    (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1248 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1249 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1250 		return 1;
1251 	return 0;
1252 }
1253 
1254 static __inline__ int
1255 xfrm_state_addr_check(const struct xfrm_state *x,
1256 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1257 		      unsigned short family)
1258 {
1259 	switch (family) {
1260 	case AF_INET:
1261 		return __xfrm4_state_addr_check(x, daddr, saddr);
1262 	case AF_INET6:
1263 		return __xfrm6_state_addr_check(x, daddr, saddr);
1264 	}
1265 	return 0;
1266 }
1267 
1268 static __inline__ int
1269 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1270 			   unsigned short family)
1271 {
1272 	switch (family) {
1273 	case AF_INET:
1274 		return __xfrm4_state_addr_check(x,
1275 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1276 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1277 	case AF_INET6:
1278 		return __xfrm6_state_addr_check(x,
1279 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1280 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1281 	}
1282 	return 0;
1283 }
1284 
1285 static inline int xfrm_state_kern(const struct xfrm_state *x)
1286 {
1287 	return atomic_read(&x->tunnel_users);
1288 }
1289 
1290 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1291 {
1292 	return (!userproto || proto == userproto ||
1293 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1294 						  proto == IPPROTO_ESP ||
1295 						  proto == IPPROTO_COMP)));
1296 }
1297 
1298 /*
1299  * xfrm algorithm information
1300  */
1301 struct xfrm_algo_aead_info {
1302 	u16 icv_truncbits;
1303 };
1304 
1305 struct xfrm_algo_auth_info {
1306 	u16 icv_truncbits;
1307 	u16 icv_fullbits;
1308 };
1309 
1310 struct xfrm_algo_encr_info {
1311 	u16 blockbits;
1312 	u16 defkeybits;
1313 };
1314 
1315 struct xfrm_algo_comp_info {
1316 	u16 threshold;
1317 };
1318 
1319 struct xfrm_algo_desc {
1320 	char *name;
1321 	char *compat;
1322 	u8 available:1;
1323 	union {
1324 		struct xfrm_algo_aead_info aead;
1325 		struct xfrm_algo_auth_info auth;
1326 		struct xfrm_algo_encr_info encr;
1327 		struct xfrm_algo_comp_info comp;
1328 	} uinfo;
1329 	struct sadb_alg desc;
1330 };
1331 
1332 /* XFRM tunnel handlers.  */
1333 struct xfrm_tunnel {
1334 	int (*handler)(struct sk_buff *skb);
1335 	int (*err_handler)(struct sk_buff *skb, u32 info);
1336 
1337 	struct xfrm_tunnel __rcu *next;
1338 	int priority;
1339 };
1340 
1341 struct xfrm6_tunnel {
1342 	int (*handler)(struct sk_buff *skb);
1343 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1344 			   u8 type, u8 code, int offset, __be32 info);
1345 	struct xfrm6_tunnel __rcu *next;
1346 	int priority;
1347 };
1348 
1349 extern void xfrm_init(void);
1350 extern void xfrm4_init(int rt_hash_size);
1351 extern int xfrm_state_init(struct net *net);
1352 extern void xfrm_state_fini(struct net *net);
1353 extern void xfrm4_state_init(void);
1354 #ifdef CONFIG_XFRM
1355 extern int xfrm6_init(void);
1356 extern void xfrm6_fini(void);
1357 extern int xfrm6_state_init(void);
1358 extern void xfrm6_state_fini(void);
1359 #else
1360 static inline int xfrm6_init(void)
1361 {
1362 	return 0;
1363 }
1364 static inline void xfrm6_fini(void)
1365 {
1366 	;
1367 }
1368 #endif
1369 
1370 #ifdef CONFIG_XFRM_STATISTICS
1371 extern int xfrm_proc_init(struct net *net);
1372 extern void xfrm_proc_fini(struct net *net);
1373 #endif
1374 
1375 extern int xfrm_sysctl_init(struct net *net);
1376 #ifdef CONFIG_SYSCTL
1377 extern void xfrm_sysctl_fini(struct net *net);
1378 #else
1379 static inline void xfrm_sysctl_fini(struct net *net)
1380 {
1381 }
1382 #endif
1383 
1384 extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
1385 extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1386 			   int (*func)(struct xfrm_state *, int, void*), void *);
1387 extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
1388 extern struct xfrm_state *xfrm_state_alloc(struct net *net);
1389 extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1390 					  const xfrm_address_t *saddr,
1391 					  const struct flowi *fl,
1392 					  struct xfrm_tmpl *tmpl,
1393 					  struct xfrm_policy *pol, int *err,
1394 					  unsigned short family);
1395 extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
1396 					       xfrm_address_t *daddr,
1397 					       xfrm_address_t *saddr,
1398 					       unsigned short family,
1399 					       u8 mode, u8 proto, u32 reqid);
1400 extern int xfrm_state_check_expire(struct xfrm_state *x);
1401 extern void xfrm_state_insert(struct xfrm_state *x);
1402 extern int xfrm_state_add(struct xfrm_state *x);
1403 extern int xfrm_state_update(struct xfrm_state *x);
1404 extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1405 					    const xfrm_address_t *daddr, __be32 spi,
1406 					    u8 proto, unsigned short family);
1407 extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1408 						   const xfrm_address_t *daddr,
1409 						   const xfrm_address_t *saddr,
1410 						   u8 proto,
1411 						   unsigned short family);
1412 #ifdef CONFIG_XFRM_SUB_POLICY
1413 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1414 			  int n, unsigned short family);
1415 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1416 			   int n, unsigned short family);
1417 #else
1418 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1419 				 int n, unsigned short family)
1420 {
1421 	return -ENOSYS;
1422 }
1423 
1424 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1425 				  int n, unsigned short family)
1426 {
1427 	return -ENOSYS;
1428 }
1429 #endif
1430 
1431 struct xfrmk_sadinfo {
1432 	u32 sadhcnt; /* current hash bkts */
1433 	u32 sadhmcnt; /* max allowed hash bkts */
1434 	u32 sadcnt; /* current running count */
1435 };
1436 
1437 struct xfrmk_spdinfo {
1438 	u32 incnt;
1439 	u32 outcnt;
1440 	u32 fwdcnt;
1441 	u32 inscnt;
1442 	u32 outscnt;
1443 	u32 fwdscnt;
1444 	u32 spdhcnt;
1445 	u32 spdhmcnt;
1446 };
1447 
1448 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
1449 					      u32 seq);
1450 extern int xfrm_state_delete(struct xfrm_state *x);
1451 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
1452 extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1453 extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1454 extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1455 extern int xfrm_init_replay(struct xfrm_state *x);
1456 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1457 extern int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
1458 extern int xfrm_init_state(struct xfrm_state *x);
1459 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1460 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1461 		      int encap_type);
1462 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1463 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1464 extern int xfrm_output(struct sk_buff *skb);
1465 extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1466 extern int xfrm4_extract_header(struct sk_buff *skb);
1467 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1468 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1469 			   int encap_type);
1470 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1471 extern int xfrm4_rcv(struct sk_buff *skb);
1472 
1473 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1474 {
1475 	return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1476 }
1477 
1478 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1479 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1480 extern int xfrm4_output(struct sk_buff *skb);
1481 extern int xfrm4_output_finish(struct sk_buff *skb);
1482 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1483 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1484 extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
1485 extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
1486 extern int xfrm6_extract_header(struct sk_buff *skb);
1487 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1488 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1489 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1490 extern int xfrm6_rcv(struct sk_buff *skb);
1491 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1492 			    xfrm_address_t *saddr, u8 proto);
1493 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1494 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1495 extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1496 extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1497 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1498 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1499 extern int xfrm6_output(struct sk_buff *skb);
1500 extern int xfrm6_output_finish(struct sk_buff *skb);
1501 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1502 				 u8 **prevhdr);
1503 
1504 #ifdef CONFIG_XFRM
1505 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1506 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1507 #else
1508 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1509 {
1510  	return -ENOPROTOOPT;
1511 }
1512 
1513 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1514 {
1515  	/* should not happen */
1516  	kfree_skb(skb);
1517 	return 0;
1518 }
1519 #endif
1520 
1521 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1522 
1523 extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1524 extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1525 	int (*func)(struct xfrm_policy *, int, int, void*), void *);
1526 extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
1527 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1528 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1529 					  u8 type, int dir,
1530 					  struct xfrm_selector *sel,
1531 					  struct xfrm_sec_ctx *ctx, int delete,
1532 					  int *err);
1533 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
1534 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
1535 u32 xfrm_get_acqseq(void);
1536 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1537 struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
1538 				 u8 mode, u32 reqid, u8 proto,
1539 				 const xfrm_address_t *daddr,
1540 				 const xfrm_address_t *saddr, int create,
1541 				 unsigned short family);
1542 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1543 
1544 #ifdef CONFIG_XFRM_MIGRATE
1545 extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1546 		      const struct xfrm_migrate *m, int num_bundles,
1547 		      const struct xfrm_kmaddress *k);
1548 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1549 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1550 					      struct xfrm_migrate *m);
1551 extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1552 			struct xfrm_migrate *m, int num_bundles,
1553 			struct xfrm_kmaddress *k);
1554 #endif
1555 
1556 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1557 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1558 extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1559 
1560 extern void xfrm_input_init(void);
1561 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1562 
1563 extern void xfrm_probe_algs(void);
1564 extern int xfrm_count_auth_supported(void);
1565 extern int xfrm_count_enc_supported(void);
1566 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1567 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1568 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1569 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1570 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1571 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1572 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1573 extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1574 extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1575 						   int probe);
1576 
1577 static inline int xfrm_addr_cmp(const xfrm_address_t *a,
1578 				const xfrm_address_t *b,
1579 				int family)
1580 {
1581 	switch (family) {
1582 	default:
1583 	case AF_INET:
1584 		return (__force u32)a->a4 - (__force u32)b->a4;
1585 	case AF_INET6:
1586 		return ipv6_addr_cmp((const struct in6_addr *)a,
1587 				     (const struct in6_addr *)b);
1588 	}
1589 }
1590 
1591 static inline int xfrm_policy_id2dir(u32 index)
1592 {
1593 	return index & 7;
1594 }
1595 
1596 #ifdef CONFIG_XFRM
1597 static inline int xfrm_aevent_is_on(struct net *net)
1598 {
1599 	struct sock *nlsk;
1600 	int ret = 0;
1601 
1602 	rcu_read_lock();
1603 	nlsk = rcu_dereference(net->xfrm.nlsk);
1604 	if (nlsk)
1605 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1606 	rcu_read_unlock();
1607 	return ret;
1608 }
1609 #endif
1610 
1611 static inline int xfrm_alg_len(const struct xfrm_algo *alg)
1612 {
1613 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1614 }
1615 
1616 static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1617 {
1618 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1619 }
1620 
1621 static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1622 {
1623 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1624 }
1625 
1626 #ifdef CONFIG_XFRM_MIGRATE
1627 static inline int xfrm_replay_clone(struct xfrm_state *x,
1628 				     struct xfrm_state *orig)
1629 {
1630 	x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
1631 				GFP_KERNEL);
1632 	if (!x->replay_esn)
1633 		return -ENOMEM;
1634 
1635 	x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
1636 	x->replay_esn->replay_window = orig->replay_esn->replay_window;
1637 
1638 	x->preplay_esn = kmemdup(x->replay_esn,
1639 				 xfrm_replay_state_esn_len(x->replay_esn),
1640 				 GFP_KERNEL);
1641 	if (!x->preplay_esn) {
1642 		kfree(x->replay_esn);
1643 		return -ENOMEM;
1644 	}
1645 
1646 	return 0;
1647 }
1648 
1649 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1650 {
1651 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1652 }
1653 
1654 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1655 {
1656 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1657 }
1658 
1659 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1660 {
1661 	int i;
1662 	for (i = 0; i < n; i++)
1663 		xfrm_state_put(*(states + i));
1664 }
1665 
1666 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1667 {
1668 	int i;
1669 	for (i = 0; i < n; i++)
1670 		xfrm_state_delete(*(states + i));
1671 }
1672 #endif
1673 
1674 #ifdef CONFIG_XFRM
1675 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1676 {
1677 	return skb->sp->xvec[skb->sp->len - 1];
1678 }
1679 #endif
1680 
1681 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1682 {
1683 	if (attrs[XFRMA_MARK])
1684 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1685 	else
1686 		m->v = m->m = 0;
1687 
1688 	return m->v & m->m;
1689 }
1690 
1691 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1692 {
1693 	int ret = 0;
1694 
1695 	if (m->m | m->v)
1696 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1697 	return ret;
1698 }
1699 
1700 #endif	/* _NET_XFRM_H */
1701