xref: /openbmc/linux/net/xfrm/xfrm_state.c (revision 752beb5e)
1 /*
2  * xfrm_state.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	YOSHIFUJI Hideaki @USAGI
10  * 		Split up af-specific functions
11  *	Derek Atkins <derek@ihtfp.com>
12  *		Add UDP Encapsulation
13  *
14  */
15 
16 #include <linux/workqueue.h>
17 #include <net/xfrm.h>
18 #include <linux/pfkeyv2.h>
19 #include <linux/ipsec.h>
20 #include <linux/module.h>
21 #include <linux/cache.h>
22 #include <linux/audit.h>
23 #include <linux/uaccess.h>
24 #include <linux/ktime.h>
25 #include <linux/slab.h>
26 #include <linux/interrupt.h>
27 #include <linux/kernel.h>
28 
29 #include "xfrm_hash.h"
30 
31 #define xfrm_state_deref_prot(table, net) \
32 	rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
33 
34 static void xfrm_state_gc_task(struct work_struct *work);
35 
36 /* Each xfrm_state may be linked to two tables:
37 
38    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
39    2. Hash table by (daddr,family,reqid) to find what SAs exist for given
40       destination/tunnel endpoint. (output)
41  */
42 
43 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
44 static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
45 static struct kmem_cache *xfrm_state_cache __ro_after_init;
46 
47 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
48 static HLIST_HEAD(xfrm_state_gc_list);
49 
50 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
51 {
52 	return refcount_inc_not_zero(&x->refcnt);
53 }
54 
55 static inline unsigned int xfrm_dst_hash(struct net *net,
56 					 const xfrm_address_t *daddr,
57 					 const xfrm_address_t *saddr,
58 					 u32 reqid,
59 					 unsigned short family)
60 {
61 	return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
62 }
63 
64 static inline unsigned int xfrm_src_hash(struct net *net,
65 					 const xfrm_address_t *daddr,
66 					 const xfrm_address_t *saddr,
67 					 unsigned short family)
68 {
69 	return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
70 }
71 
72 static inline unsigned int
73 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
74 	      __be32 spi, u8 proto, unsigned short family)
75 {
76 	return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
77 }
78 
79 static void xfrm_hash_transfer(struct hlist_head *list,
80 			       struct hlist_head *ndsttable,
81 			       struct hlist_head *nsrctable,
82 			       struct hlist_head *nspitable,
83 			       unsigned int nhashmask)
84 {
85 	struct hlist_node *tmp;
86 	struct xfrm_state *x;
87 
88 	hlist_for_each_entry_safe(x, tmp, list, bydst) {
89 		unsigned int h;
90 
91 		h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
92 				    x->props.reqid, x->props.family,
93 				    nhashmask);
94 		hlist_add_head_rcu(&x->bydst, ndsttable + h);
95 
96 		h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
97 				    x->props.family,
98 				    nhashmask);
99 		hlist_add_head_rcu(&x->bysrc, nsrctable + h);
100 
101 		if (x->id.spi) {
102 			h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
103 					    x->id.proto, x->props.family,
104 					    nhashmask);
105 			hlist_add_head_rcu(&x->byspi, nspitable + h);
106 		}
107 	}
108 }
109 
110 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
111 {
112 	return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
113 }
114 
115 static void xfrm_hash_resize(struct work_struct *work)
116 {
117 	struct net *net = container_of(work, struct net, xfrm.state_hash_work);
118 	struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi;
119 	unsigned long nsize, osize;
120 	unsigned int nhashmask, ohashmask;
121 	int i;
122 
123 	nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
124 	ndst = xfrm_hash_alloc(nsize);
125 	if (!ndst)
126 		return;
127 	nsrc = xfrm_hash_alloc(nsize);
128 	if (!nsrc) {
129 		xfrm_hash_free(ndst, nsize);
130 		return;
131 	}
132 	nspi = xfrm_hash_alloc(nsize);
133 	if (!nspi) {
134 		xfrm_hash_free(ndst, nsize);
135 		xfrm_hash_free(nsrc, nsize);
136 		return;
137 	}
138 
139 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
140 	write_seqcount_begin(&xfrm_state_hash_generation);
141 
142 	nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
143 	odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
144 	for (i = net->xfrm.state_hmask; i >= 0; i--)
145 		xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nhashmask);
146 
147 	osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
148 	ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
149 	ohashmask = net->xfrm.state_hmask;
150 
151 	rcu_assign_pointer(net->xfrm.state_bydst, ndst);
152 	rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
153 	rcu_assign_pointer(net->xfrm.state_byspi, nspi);
154 	net->xfrm.state_hmask = nhashmask;
155 
156 	write_seqcount_end(&xfrm_state_hash_generation);
157 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
158 
159 	osize = (ohashmask + 1) * sizeof(struct hlist_head);
160 
161 	synchronize_rcu();
162 
163 	xfrm_hash_free(odst, osize);
164 	xfrm_hash_free(osrc, osize);
165 	xfrm_hash_free(ospi, osize);
166 }
167 
168 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
169 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
170 
171 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
172 
173 int __xfrm_state_delete(struct xfrm_state *x);
174 
175 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
176 static bool km_is_alive(const struct km_event *c);
177 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
178 
179 static DEFINE_SPINLOCK(xfrm_type_lock);
180 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
181 {
182 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
183 	const struct xfrm_type **typemap;
184 	int err = 0;
185 
186 	if (unlikely(afinfo == NULL))
187 		return -EAFNOSUPPORT;
188 	typemap = afinfo->type_map;
189 	spin_lock_bh(&xfrm_type_lock);
190 
191 	if (likely(typemap[type->proto] == NULL))
192 		typemap[type->proto] = type;
193 	else
194 		err = -EEXIST;
195 	spin_unlock_bh(&xfrm_type_lock);
196 	rcu_read_unlock();
197 	return err;
198 }
199 EXPORT_SYMBOL(xfrm_register_type);
200 
201 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
202 {
203 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
204 	const struct xfrm_type **typemap;
205 	int err = 0;
206 
207 	if (unlikely(afinfo == NULL))
208 		return -EAFNOSUPPORT;
209 	typemap = afinfo->type_map;
210 	spin_lock_bh(&xfrm_type_lock);
211 
212 	if (unlikely(typemap[type->proto] != type))
213 		err = -ENOENT;
214 	else
215 		typemap[type->proto] = NULL;
216 	spin_unlock_bh(&xfrm_type_lock);
217 	rcu_read_unlock();
218 	return err;
219 }
220 EXPORT_SYMBOL(xfrm_unregister_type);
221 
222 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
223 {
224 	struct xfrm_state_afinfo *afinfo;
225 	const struct xfrm_type **typemap;
226 	const struct xfrm_type *type;
227 	int modload_attempted = 0;
228 
229 retry:
230 	afinfo = xfrm_state_get_afinfo(family);
231 	if (unlikely(afinfo == NULL))
232 		return NULL;
233 	typemap = afinfo->type_map;
234 
235 	type = READ_ONCE(typemap[proto]);
236 	if (unlikely(type && !try_module_get(type->owner)))
237 		type = NULL;
238 
239 	rcu_read_unlock();
240 
241 	if (!type && !modload_attempted) {
242 		request_module("xfrm-type-%d-%d", family, proto);
243 		modload_attempted = 1;
244 		goto retry;
245 	}
246 
247 	return type;
248 }
249 
250 static void xfrm_put_type(const struct xfrm_type *type)
251 {
252 	module_put(type->owner);
253 }
254 
255 static DEFINE_SPINLOCK(xfrm_type_offload_lock);
256 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
257 			       unsigned short family)
258 {
259 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
260 	const struct xfrm_type_offload **typemap;
261 	int err = 0;
262 
263 	if (unlikely(afinfo == NULL))
264 		return -EAFNOSUPPORT;
265 	typemap = afinfo->type_offload_map;
266 	spin_lock_bh(&xfrm_type_offload_lock);
267 
268 	if (likely(typemap[type->proto] == NULL))
269 		typemap[type->proto] = type;
270 	else
271 		err = -EEXIST;
272 	spin_unlock_bh(&xfrm_type_offload_lock);
273 	rcu_read_unlock();
274 	return err;
275 }
276 EXPORT_SYMBOL(xfrm_register_type_offload);
277 
278 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
279 				 unsigned short family)
280 {
281 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
282 	const struct xfrm_type_offload **typemap;
283 	int err = 0;
284 
285 	if (unlikely(afinfo == NULL))
286 		return -EAFNOSUPPORT;
287 	typemap = afinfo->type_offload_map;
288 	spin_lock_bh(&xfrm_type_offload_lock);
289 
290 	if (unlikely(typemap[type->proto] != type))
291 		err = -ENOENT;
292 	else
293 		typemap[type->proto] = NULL;
294 	spin_unlock_bh(&xfrm_type_offload_lock);
295 	rcu_read_unlock();
296 	return err;
297 }
298 EXPORT_SYMBOL(xfrm_unregister_type_offload);
299 
300 static const struct xfrm_type_offload *
301 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load)
302 {
303 	struct xfrm_state_afinfo *afinfo;
304 	const struct xfrm_type_offload **typemap;
305 	const struct xfrm_type_offload *type;
306 
307 retry:
308 	afinfo = xfrm_state_get_afinfo(family);
309 	if (unlikely(afinfo == NULL))
310 		return NULL;
311 	typemap = afinfo->type_offload_map;
312 
313 	type = typemap[proto];
314 	if ((type && !try_module_get(type->owner)))
315 		type = NULL;
316 
317 	rcu_read_unlock();
318 
319 	if (!type && try_load) {
320 		request_module("xfrm-offload-%d-%d", family, proto);
321 		try_load = false;
322 		goto retry;
323 	}
324 
325 	return type;
326 }
327 
328 static void xfrm_put_type_offload(const struct xfrm_type_offload *type)
329 {
330 	module_put(type->owner);
331 }
332 
333 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
334 	[XFRM_MODE_BEET] = {
335 		.encap = XFRM_MODE_BEET,
336 		.flags = XFRM_MODE_FLAG_TUNNEL,
337 		.family = AF_INET,
338 	},
339 	[XFRM_MODE_TRANSPORT] = {
340 		.encap = XFRM_MODE_TRANSPORT,
341 		.family = AF_INET,
342 	},
343 	[XFRM_MODE_TUNNEL] = {
344 		.encap = XFRM_MODE_TUNNEL,
345 		.flags = XFRM_MODE_FLAG_TUNNEL,
346 		.family = AF_INET,
347 	},
348 };
349 
350 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
351 	[XFRM_MODE_BEET] = {
352 		.encap = XFRM_MODE_BEET,
353 		.flags = XFRM_MODE_FLAG_TUNNEL,
354 		.family = AF_INET6,
355 	},
356 	[XFRM_MODE_ROUTEOPTIMIZATION] = {
357 		.encap = XFRM_MODE_ROUTEOPTIMIZATION,
358 		.family = AF_INET6,
359 	},
360 	[XFRM_MODE_TRANSPORT] = {
361 		.encap = XFRM_MODE_TRANSPORT,
362 		.family = AF_INET6,
363 	},
364 	[XFRM_MODE_TUNNEL] = {
365 		.encap = XFRM_MODE_TUNNEL,
366 		.flags = XFRM_MODE_FLAG_TUNNEL,
367 		.family = AF_INET6,
368 	},
369 };
370 
371 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
372 {
373 	const struct xfrm_mode *mode;
374 
375 	if (unlikely(encap >= XFRM_MODE_MAX))
376 		return NULL;
377 
378 	switch (family) {
379 	case AF_INET:
380 		mode = &xfrm4_mode_map[encap];
381 		if (mode->family == family)
382 			return mode;
383 		break;
384 	case AF_INET6:
385 		mode = &xfrm6_mode_map[encap];
386 		if (mode->family == family)
387 			return mode;
388 		break;
389 	default:
390 		break;
391 	}
392 
393 	return NULL;
394 }
395 
396 void xfrm_state_free(struct xfrm_state *x)
397 {
398 	kmem_cache_free(xfrm_state_cache, x);
399 }
400 EXPORT_SYMBOL(xfrm_state_free);
401 
402 static void ___xfrm_state_destroy(struct xfrm_state *x)
403 {
404 	hrtimer_cancel(&x->mtimer);
405 	del_timer_sync(&x->rtimer);
406 	kfree(x->aead);
407 	kfree(x->aalg);
408 	kfree(x->ealg);
409 	kfree(x->calg);
410 	kfree(x->encap);
411 	kfree(x->coaddr);
412 	kfree(x->replay_esn);
413 	kfree(x->preplay_esn);
414 	if (x->type_offload)
415 		xfrm_put_type_offload(x->type_offload);
416 	if (x->type) {
417 		x->type->destructor(x);
418 		xfrm_put_type(x->type);
419 	}
420 	xfrm_dev_state_free(x);
421 	security_xfrm_state_free(x);
422 	xfrm_state_free(x);
423 }
424 
425 static void xfrm_state_gc_task(struct work_struct *work)
426 {
427 	struct xfrm_state *x;
428 	struct hlist_node *tmp;
429 	struct hlist_head gc_list;
430 
431 	spin_lock_bh(&xfrm_state_gc_lock);
432 	hlist_move_list(&xfrm_state_gc_list, &gc_list);
433 	spin_unlock_bh(&xfrm_state_gc_lock);
434 
435 	synchronize_rcu();
436 
437 	hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
438 		___xfrm_state_destroy(x);
439 }
440 
441 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
442 {
443 	struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
444 	enum hrtimer_restart ret = HRTIMER_NORESTART;
445 	time64_t now = ktime_get_real_seconds();
446 	time64_t next = TIME64_MAX;
447 	int warn = 0;
448 	int err = 0;
449 
450 	spin_lock(&x->lock);
451 	if (x->km.state == XFRM_STATE_DEAD)
452 		goto out;
453 	if (x->km.state == XFRM_STATE_EXPIRED)
454 		goto expired;
455 	if (x->lft.hard_add_expires_seconds) {
456 		long tmo = x->lft.hard_add_expires_seconds +
457 			x->curlft.add_time - now;
458 		if (tmo <= 0) {
459 			if (x->xflags & XFRM_SOFT_EXPIRE) {
460 				/* enter hard expire without soft expire first?!
461 				 * setting a new date could trigger this.
462 				 * workaround: fix x->curflt.add_time by below:
463 				 */
464 				x->curlft.add_time = now - x->saved_tmo - 1;
465 				tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
466 			} else
467 				goto expired;
468 		}
469 		if (tmo < next)
470 			next = tmo;
471 	}
472 	if (x->lft.hard_use_expires_seconds) {
473 		long tmo = x->lft.hard_use_expires_seconds +
474 			(x->curlft.use_time ? : now) - now;
475 		if (tmo <= 0)
476 			goto expired;
477 		if (tmo < next)
478 			next = tmo;
479 	}
480 	if (x->km.dying)
481 		goto resched;
482 	if (x->lft.soft_add_expires_seconds) {
483 		long tmo = x->lft.soft_add_expires_seconds +
484 			x->curlft.add_time - now;
485 		if (tmo <= 0) {
486 			warn = 1;
487 			x->xflags &= ~XFRM_SOFT_EXPIRE;
488 		} else if (tmo < next) {
489 			next = tmo;
490 			x->xflags |= XFRM_SOFT_EXPIRE;
491 			x->saved_tmo = tmo;
492 		}
493 	}
494 	if (x->lft.soft_use_expires_seconds) {
495 		long tmo = x->lft.soft_use_expires_seconds +
496 			(x->curlft.use_time ? : now) - now;
497 		if (tmo <= 0)
498 			warn = 1;
499 		else if (tmo < next)
500 			next = tmo;
501 	}
502 
503 	x->km.dying = warn;
504 	if (warn)
505 		km_state_expired(x, 0, 0);
506 resched:
507 	if (next != TIME64_MAX) {
508 		hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
509 		ret = HRTIMER_RESTART;
510 	}
511 
512 	goto out;
513 
514 expired:
515 	if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
516 		x->km.state = XFRM_STATE_EXPIRED;
517 
518 	err = __xfrm_state_delete(x);
519 	if (!err)
520 		km_state_expired(x, 1, 0);
521 
522 	xfrm_audit_state_delete(x, err ? 0 : 1, true);
523 
524 out:
525 	spin_unlock(&x->lock);
526 	return ret;
527 }
528 
529 static void xfrm_replay_timer_handler(struct timer_list *t);
530 
531 struct xfrm_state *xfrm_state_alloc(struct net *net)
532 {
533 	struct xfrm_state *x;
534 
535 	x = kmem_cache_alloc(xfrm_state_cache, GFP_ATOMIC | __GFP_ZERO);
536 
537 	if (x) {
538 		write_pnet(&x->xs_net, net);
539 		refcount_set(&x->refcnt, 1);
540 		atomic_set(&x->tunnel_users, 0);
541 		INIT_LIST_HEAD(&x->km.all);
542 		INIT_HLIST_NODE(&x->bydst);
543 		INIT_HLIST_NODE(&x->bysrc);
544 		INIT_HLIST_NODE(&x->byspi);
545 		hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
546 		x->mtimer.function = xfrm_timer_handler;
547 		timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
548 		x->curlft.add_time = ktime_get_real_seconds();
549 		x->lft.soft_byte_limit = XFRM_INF;
550 		x->lft.soft_packet_limit = XFRM_INF;
551 		x->lft.hard_byte_limit = XFRM_INF;
552 		x->lft.hard_packet_limit = XFRM_INF;
553 		x->replay_maxage = 0;
554 		x->replay_maxdiff = 0;
555 		spin_lock_init(&x->lock);
556 	}
557 	return x;
558 }
559 EXPORT_SYMBOL(xfrm_state_alloc);
560 
561 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
562 {
563 	WARN_ON(x->km.state != XFRM_STATE_DEAD);
564 
565 	if (sync) {
566 		synchronize_rcu();
567 		___xfrm_state_destroy(x);
568 	} else {
569 		spin_lock_bh(&xfrm_state_gc_lock);
570 		hlist_add_head(&x->gclist, &xfrm_state_gc_list);
571 		spin_unlock_bh(&xfrm_state_gc_lock);
572 		schedule_work(&xfrm_state_gc_work);
573 	}
574 }
575 EXPORT_SYMBOL(__xfrm_state_destroy);
576 
577 int __xfrm_state_delete(struct xfrm_state *x)
578 {
579 	struct net *net = xs_net(x);
580 	int err = -ESRCH;
581 
582 	if (x->km.state != XFRM_STATE_DEAD) {
583 		x->km.state = XFRM_STATE_DEAD;
584 		spin_lock(&net->xfrm.xfrm_state_lock);
585 		list_del(&x->km.all);
586 		hlist_del_rcu(&x->bydst);
587 		hlist_del_rcu(&x->bysrc);
588 		if (x->id.spi)
589 			hlist_del_rcu(&x->byspi);
590 		net->xfrm.state_num--;
591 		spin_unlock(&net->xfrm.xfrm_state_lock);
592 
593 		xfrm_dev_state_delete(x);
594 
595 		/* All xfrm_state objects are created by xfrm_state_alloc.
596 		 * The xfrm_state_alloc call gives a reference, and that
597 		 * is what we are dropping here.
598 		 */
599 		xfrm_state_put(x);
600 		err = 0;
601 	}
602 
603 	return err;
604 }
605 EXPORT_SYMBOL(__xfrm_state_delete);
606 
607 int xfrm_state_delete(struct xfrm_state *x)
608 {
609 	int err;
610 
611 	spin_lock_bh(&x->lock);
612 	err = __xfrm_state_delete(x);
613 	spin_unlock_bh(&x->lock);
614 
615 	return err;
616 }
617 EXPORT_SYMBOL(xfrm_state_delete);
618 
619 #ifdef CONFIG_SECURITY_NETWORK_XFRM
620 static inline int
621 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
622 {
623 	int i, err = 0;
624 
625 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
626 		struct xfrm_state *x;
627 
628 		hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
629 			if (xfrm_id_proto_match(x->id.proto, proto) &&
630 			   (err = security_xfrm_state_delete(x)) != 0) {
631 				xfrm_audit_state_delete(x, 0, task_valid);
632 				return err;
633 			}
634 		}
635 	}
636 
637 	return err;
638 }
639 
640 static inline int
641 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
642 {
643 	int i, err = 0;
644 
645 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
646 		struct xfrm_state *x;
647 		struct xfrm_state_offload *xso;
648 
649 		hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
650 			xso = &x->xso;
651 
652 			if (xso->dev == dev &&
653 			   (err = security_xfrm_state_delete(x)) != 0) {
654 				xfrm_audit_state_delete(x, 0, task_valid);
655 				return err;
656 			}
657 		}
658 	}
659 
660 	return err;
661 }
662 #else
663 static inline int
664 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
665 {
666 	return 0;
667 }
668 
669 static inline int
670 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
671 {
672 	return 0;
673 }
674 #endif
675 
676 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
677 {
678 	int i, err = 0, cnt = 0;
679 
680 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
681 	err = xfrm_state_flush_secctx_check(net, proto, task_valid);
682 	if (err)
683 		goto out;
684 
685 	err = -ESRCH;
686 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
687 		struct xfrm_state *x;
688 restart:
689 		hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
690 			if (!xfrm_state_kern(x) &&
691 			    xfrm_id_proto_match(x->id.proto, proto)) {
692 				xfrm_state_hold(x);
693 				spin_unlock_bh(&net->xfrm.xfrm_state_lock);
694 
695 				err = xfrm_state_delete(x);
696 				xfrm_audit_state_delete(x, err ? 0 : 1,
697 							task_valid);
698 				if (sync)
699 					xfrm_state_put_sync(x);
700 				else
701 					xfrm_state_put(x);
702 				if (!err)
703 					cnt++;
704 
705 				spin_lock_bh(&net->xfrm.xfrm_state_lock);
706 				goto restart;
707 			}
708 		}
709 	}
710 out:
711 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
712 	if (cnt)
713 		err = 0;
714 
715 	return err;
716 }
717 EXPORT_SYMBOL(xfrm_state_flush);
718 
719 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
720 {
721 	int i, err = 0, cnt = 0;
722 
723 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
724 	err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
725 	if (err)
726 		goto out;
727 
728 	err = -ESRCH;
729 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
730 		struct xfrm_state *x;
731 		struct xfrm_state_offload *xso;
732 restart:
733 		hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
734 			xso = &x->xso;
735 
736 			if (!xfrm_state_kern(x) && xso->dev == dev) {
737 				xfrm_state_hold(x);
738 				spin_unlock_bh(&net->xfrm.xfrm_state_lock);
739 
740 				err = xfrm_state_delete(x);
741 				xfrm_audit_state_delete(x, err ? 0 : 1,
742 							task_valid);
743 				xfrm_state_put(x);
744 				if (!err)
745 					cnt++;
746 
747 				spin_lock_bh(&net->xfrm.xfrm_state_lock);
748 				goto restart;
749 			}
750 		}
751 	}
752 	if (cnt)
753 		err = 0;
754 
755 out:
756 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
757 	return err;
758 }
759 EXPORT_SYMBOL(xfrm_dev_state_flush);
760 
761 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
762 {
763 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
764 	si->sadcnt = net->xfrm.state_num;
765 	si->sadhcnt = net->xfrm.state_hmask + 1;
766 	si->sadhmcnt = xfrm_state_hashmax;
767 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
768 }
769 EXPORT_SYMBOL(xfrm_sad_getinfo);
770 
771 static void
772 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
773 		    const struct xfrm_tmpl *tmpl,
774 		    const xfrm_address_t *daddr, const xfrm_address_t *saddr,
775 		    unsigned short family)
776 {
777 	struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
778 
779 	if (!afinfo)
780 		return;
781 
782 	afinfo->init_tempsel(&x->sel, fl);
783 
784 	if (family != tmpl->encap_family) {
785 		afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
786 		if (!afinfo)
787 			return;
788 	}
789 	afinfo->init_temprop(x, tmpl, daddr, saddr);
790 }
791 
792 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
793 					      const xfrm_address_t *daddr,
794 					      __be32 spi, u8 proto,
795 					      unsigned short family)
796 {
797 	unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
798 	struct xfrm_state *x;
799 
800 	hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
801 		if (x->props.family != family ||
802 		    x->id.spi       != spi ||
803 		    x->id.proto     != proto ||
804 		    !xfrm_addr_equal(&x->id.daddr, daddr, family))
805 			continue;
806 
807 		if ((mark & x->mark.m) != x->mark.v)
808 			continue;
809 		if (!xfrm_state_hold_rcu(x))
810 			continue;
811 		return x;
812 	}
813 
814 	return NULL;
815 }
816 
817 static struct xfrm_state *__xfrm_state_lookup_byaddr(struct net *net, u32 mark,
818 						     const xfrm_address_t *daddr,
819 						     const xfrm_address_t *saddr,
820 						     u8 proto, unsigned short family)
821 {
822 	unsigned int h = xfrm_src_hash(net, daddr, saddr, family);
823 	struct xfrm_state *x;
824 
825 	hlist_for_each_entry_rcu(x, net->xfrm.state_bysrc + h, bysrc) {
826 		if (x->props.family != family ||
827 		    x->id.proto     != proto ||
828 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
829 		    !xfrm_addr_equal(&x->props.saddr, saddr, family))
830 			continue;
831 
832 		if ((mark & x->mark.m) != x->mark.v)
833 			continue;
834 		if (!xfrm_state_hold_rcu(x))
835 			continue;
836 		return x;
837 	}
838 
839 	return NULL;
840 }
841 
842 static inline struct xfrm_state *
843 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
844 {
845 	struct net *net = xs_net(x);
846 	u32 mark = x->mark.v & x->mark.m;
847 
848 	if (use_spi)
849 		return __xfrm_state_lookup(net, mark, &x->id.daddr,
850 					   x->id.spi, x->id.proto, family);
851 	else
852 		return __xfrm_state_lookup_byaddr(net, mark,
853 						  &x->id.daddr,
854 						  &x->props.saddr,
855 						  x->id.proto, family);
856 }
857 
858 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
859 {
860 	if (have_hash_collision &&
861 	    (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
862 	    net->xfrm.state_num > net->xfrm.state_hmask)
863 		schedule_work(&net->xfrm.state_hash_work);
864 }
865 
866 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
867 			       const struct flowi *fl, unsigned short family,
868 			       struct xfrm_state **best, int *acq_in_progress,
869 			       int *error)
870 {
871 	/* Resolution logic:
872 	 * 1. There is a valid state with matching selector. Done.
873 	 * 2. Valid state with inappropriate selector. Skip.
874 	 *
875 	 * Entering area of "sysdeps".
876 	 *
877 	 * 3. If state is not valid, selector is temporary, it selects
878 	 *    only session which triggered previous resolution. Key
879 	 *    manager will do something to install a state with proper
880 	 *    selector.
881 	 */
882 	if (x->km.state == XFRM_STATE_VALID) {
883 		if ((x->sel.family &&
884 		     !xfrm_selector_match(&x->sel, fl, x->sel.family)) ||
885 		    !security_xfrm_state_pol_flow_match(x, pol, fl))
886 			return;
887 
888 		if (!*best ||
889 		    (*best)->km.dying > x->km.dying ||
890 		    ((*best)->km.dying == x->km.dying &&
891 		     (*best)->curlft.add_time < x->curlft.add_time))
892 			*best = x;
893 	} else if (x->km.state == XFRM_STATE_ACQ) {
894 		*acq_in_progress = 1;
895 	} else if (x->km.state == XFRM_STATE_ERROR ||
896 		   x->km.state == XFRM_STATE_EXPIRED) {
897 		if (xfrm_selector_match(&x->sel, fl, x->sel.family) &&
898 		    security_xfrm_state_pol_flow_match(x, pol, fl))
899 			*error = -ESRCH;
900 	}
901 }
902 
903 struct xfrm_state *
904 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
905 		const struct flowi *fl, struct xfrm_tmpl *tmpl,
906 		struct xfrm_policy *pol, int *err,
907 		unsigned short family, u32 if_id)
908 {
909 	static xfrm_address_t saddr_wildcard = { };
910 	struct net *net = xp_net(pol);
911 	unsigned int h, h_wildcard;
912 	struct xfrm_state *x, *x0, *to_put;
913 	int acquire_in_progress = 0;
914 	int error = 0;
915 	struct xfrm_state *best = NULL;
916 	u32 mark = pol->mark.v & pol->mark.m;
917 	unsigned short encap_family = tmpl->encap_family;
918 	unsigned int sequence;
919 	struct km_event c;
920 
921 	to_put = NULL;
922 
923 	sequence = read_seqcount_begin(&xfrm_state_hash_generation);
924 
925 	rcu_read_lock();
926 	h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
927 	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
928 		if (x->props.family == encap_family &&
929 		    x->props.reqid == tmpl->reqid &&
930 		    (mark & x->mark.m) == x->mark.v &&
931 		    x->if_id == if_id &&
932 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
933 		    xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
934 		    tmpl->mode == x->props.mode &&
935 		    tmpl->id.proto == x->id.proto &&
936 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
937 			xfrm_state_look_at(pol, x, fl, encap_family,
938 					   &best, &acquire_in_progress, &error);
939 	}
940 	if (best || acquire_in_progress)
941 		goto found;
942 
943 	h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
944 	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
945 		if (x->props.family == encap_family &&
946 		    x->props.reqid == tmpl->reqid &&
947 		    (mark & x->mark.m) == x->mark.v &&
948 		    x->if_id == if_id &&
949 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
950 		    xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
951 		    tmpl->mode == x->props.mode &&
952 		    tmpl->id.proto == x->id.proto &&
953 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
954 			xfrm_state_look_at(pol, x, fl, encap_family,
955 					   &best, &acquire_in_progress, &error);
956 	}
957 
958 found:
959 	x = best;
960 	if (!x && !error && !acquire_in_progress) {
961 		if (tmpl->id.spi &&
962 		    (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
963 					      tmpl->id.proto, encap_family)) != NULL) {
964 			to_put = x0;
965 			error = -EEXIST;
966 			goto out;
967 		}
968 
969 		c.net = net;
970 		/* If the KMs have no listeners (yet...), avoid allocating an SA
971 		 * for each and every packet - garbage collection might not
972 		 * handle the flood.
973 		 */
974 		if (!km_is_alive(&c)) {
975 			error = -ESRCH;
976 			goto out;
977 		}
978 
979 		x = xfrm_state_alloc(net);
980 		if (x == NULL) {
981 			error = -ENOMEM;
982 			goto out;
983 		}
984 		/* Initialize temporary state matching only
985 		 * to current session. */
986 		xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
987 		memcpy(&x->mark, &pol->mark, sizeof(x->mark));
988 		x->if_id = if_id;
989 
990 		error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
991 		if (error) {
992 			x->km.state = XFRM_STATE_DEAD;
993 			to_put = x;
994 			x = NULL;
995 			goto out;
996 		}
997 
998 		if (km_query(x, tmpl, pol) == 0) {
999 			spin_lock_bh(&net->xfrm.xfrm_state_lock);
1000 			x->km.state = XFRM_STATE_ACQ;
1001 			list_add(&x->km.all, &net->xfrm.state_all);
1002 			hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1003 			h = xfrm_src_hash(net, daddr, saddr, encap_family);
1004 			hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1005 			if (x->id.spi) {
1006 				h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1007 				hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1008 			}
1009 			x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1010 			hrtimer_start(&x->mtimer,
1011 				      ktime_set(net->xfrm.sysctl_acq_expires, 0),
1012 				      HRTIMER_MODE_REL_SOFT);
1013 			net->xfrm.state_num++;
1014 			xfrm_hash_grow_check(net, x->bydst.next != NULL);
1015 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1016 		} else {
1017 			x->km.state = XFRM_STATE_DEAD;
1018 			to_put = x;
1019 			x = NULL;
1020 			error = -ESRCH;
1021 		}
1022 	}
1023 out:
1024 	if (x) {
1025 		if (!xfrm_state_hold_rcu(x)) {
1026 			*err = -EAGAIN;
1027 			x = NULL;
1028 		}
1029 	} else {
1030 		*err = acquire_in_progress ? -EAGAIN : error;
1031 	}
1032 	rcu_read_unlock();
1033 	if (to_put)
1034 		xfrm_state_put(to_put);
1035 
1036 	if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
1037 		*err = -EAGAIN;
1038 		if (x) {
1039 			xfrm_state_put(x);
1040 			x = NULL;
1041 		}
1042 	}
1043 
1044 	return x;
1045 }
1046 
1047 struct xfrm_state *
1048 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1049 		    xfrm_address_t *daddr, xfrm_address_t *saddr,
1050 		    unsigned short family, u8 mode, u8 proto, u32 reqid)
1051 {
1052 	unsigned int h;
1053 	struct xfrm_state *rx = NULL, *x = NULL;
1054 
1055 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1056 	h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1057 	hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1058 		if (x->props.family == family &&
1059 		    x->props.reqid == reqid &&
1060 		    (mark & x->mark.m) == x->mark.v &&
1061 		    x->if_id == if_id &&
1062 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
1063 		    xfrm_state_addr_check(x, daddr, saddr, family) &&
1064 		    mode == x->props.mode &&
1065 		    proto == x->id.proto &&
1066 		    x->km.state == XFRM_STATE_VALID) {
1067 			rx = x;
1068 			break;
1069 		}
1070 	}
1071 
1072 	if (rx)
1073 		xfrm_state_hold(rx);
1074 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1075 
1076 
1077 	return rx;
1078 }
1079 EXPORT_SYMBOL(xfrm_stateonly_find);
1080 
1081 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1082 					      unsigned short family)
1083 {
1084 	struct xfrm_state *x;
1085 	struct xfrm_state_walk *w;
1086 
1087 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1088 	list_for_each_entry(w, &net->xfrm.state_all, all) {
1089 		x = container_of(w, struct xfrm_state, km);
1090 		if (x->props.family != family ||
1091 			x->id.spi != spi)
1092 			continue;
1093 
1094 		xfrm_state_hold(x);
1095 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1096 		return x;
1097 	}
1098 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1099 	return NULL;
1100 }
1101 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1102 
1103 static void __xfrm_state_insert(struct xfrm_state *x)
1104 {
1105 	struct net *net = xs_net(x);
1106 	unsigned int h;
1107 
1108 	list_add(&x->km.all, &net->xfrm.state_all);
1109 
1110 	h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1111 			  x->props.reqid, x->props.family);
1112 	hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1113 
1114 	h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1115 	hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1116 
1117 	if (x->id.spi) {
1118 		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1119 				  x->props.family);
1120 
1121 		hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1122 	}
1123 
1124 	hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
1125 	if (x->replay_maxage)
1126 		mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1127 
1128 	net->xfrm.state_num++;
1129 
1130 	xfrm_hash_grow_check(net, x->bydst.next != NULL);
1131 }
1132 
1133 /* net->xfrm.xfrm_state_lock is held */
1134 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1135 {
1136 	struct net *net = xs_net(xnew);
1137 	unsigned short family = xnew->props.family;
1138 	u32 reqid = xnew->props.reqid;
1139 	struct xfrm_state *x;
1140 	unsigned int h;
1141 	u32 mark = xnew->mark.v & xnew->mark.m;
1142 	u32 if_id = xnew->if_id;
1143 
1144 	h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1145 	hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1146 		if (x->props.family	== family &&
1147 		    x->props.reqid	== reqid &&
1148 		    x->if_id		== if_id &&
1149 		    (mark & x->mark.m) == x->mark.v &&
1150 		    xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1151 		    xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1152 			x->genid++;
1153 	}
1154 }
1155 
1156 void xfrm_state_insert(struct xfrm_state *x)
1157 {
1158 	struct net *net = xs_net(x);
1159 
1160 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1161 	__xfrm_state_bump_genids(x);
1162 	__xfrm_state_insert(x);
1163 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1164 }
1165 EXPORT_SYMBOL(xfrm_state_insert);
1166 
1167 /* net->xfrm.xfrm_state_lock is held */
1168 static struct xfrm_state *__find_acq_core(struct net *net,
1169 					  const struct xfrm_mark *m,
1170 					  unsigned short family, u8 mode,
1171 					  u32 reqid, u32 if_id, u8 proto,
1172 					  const xfrm_address_t *daddr,
1173 					  const xfrm_address_t *saddr,
1174 					  int create)
1175 {
1176 	unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1177 	struct xfrm_state *x;
1178 	u32 mark = m->v & m->m;
1179 
1180 	hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1181 		if (x->props.reqid  != reqid ||
1182 		    x->props.mode   != mode ||
1183 		    x->props.family != family ||
1184 		    x->km.state     != XFRM_STATE_ACQ ||
1185 		    x->id.spi       != 0 ||
1186 		    x->id.proto	    != proto ||
1187 		    (mark & x->mark.m) != x->mark.v ||
1188 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1189 		    !xfrm_addr_equal(&x->props.saddr, saddr, family))
1190 			continue;
1191 
1192 		xfrm_state_hold(x);
1193 		return x;
1194 	}
1195 
1196 	if (!create)
1197 		return NULL;
1198 
1199 	x = xfrm_state_alloc(net);
1200 	if (likely(x)) {
1201 		switch (family) {
1202 		case AF_INET:
1203 			x->sel.daddr.a4 = daddr->a4;
1204 			x->sel.saddr.a4 = saddr->a4;
1205 			x->sel.prefixlen_d = 32;
1206 			x->sel.prefixlen_s = 32;
1207 			x->props.saddr.a4 = saddr->a4;
1208 			x->id.daddr.a4 = daddr->a4;
1209 			break;
1210 
1211 		case AF_INET6:
1212 			x->sel.daddr.in6 = daddr->in6;
1213 			x->sel.saddr.in6 = saddr->in6;
1214 			x->sel.prefixlen_d = 128;
1215 			x->sel.prefixlen_s = 128;
1216 			x->props.saddr.in6 = saddr->in6;
1217 			x->id.daddr.in6 = daddr->in6;
1218 			break;
1219 		}
1220 
1221 		x->km.state = XFRM_STATE_ACQ;
1222 		x->id.proto = proto;
1223 		x->props.family = family;
1224 		x->props.mode = mode;
1225 		x->props.reqid = reqid;
1226 		x->if_id = if_id;
1227 		x->mark.v = m->v;
1228 		x->mark.m = m->m;
1229 		x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1230 		xfrm_state_hold(x);
1231 		hrtimer_start(&x->mtimer,
1232 			      ktime_set(net->xfrm.sysctl_acq_expires, 0),
1233 			      HRTIMER_MODE_REL_SOFT);
1234 		list_add(&x->km.all, &net->xfrm.state_all);
1235 		hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
1236 		h = xfrm_src_hash(net, daddr, saddr, family);
1237 		hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
1238 
1239 		net->xfrm.state_num++;
1240 
1241 		xfrm_hash_grow_check(net, x->bydst.next != NULL);
1242 	}
1243 
1244 	return x;
1245 }
1246 
1247 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1248 
1249 int xfrm_state_add(struct xfrm_state *x)
1250 {
1251 	struct net *net = xs_net(x);
1252 	struct xfrm_state *x1, *to_put;
1253 	int family;
1254 	int err;
1255 	u32 mark = x->mark.v & x->mark.m;
1256 	int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1257 
1258 	family = x->props.family;
1259 
1260 	to_put = NULL;
1261 
1262 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1263 
1264 	x1 = __xfrm_state_locate(x, use_spi, family);
1265 	if (x1) {
1266 		to_put = x1;
1267 		x1 = NULL;
1268 		err = -EEXIST;
1269 		goto out;
1270 	}
1271 
1272 	if (use_spi && x->km.seq) {
1273 		x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq);
1274 		if (x1 && ((x1->id.proto != x->id.proto) ||
1275 		    !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1276 			to_put = x1;
1277 			x1 = NULL;
1278 		}
1279 	}
1280 
1281 	if (use_spi && !x1)
1282 		x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1283 				     x->props.reqid, x->if_id, x->id.proto,
1284 				     &x->id.daddr, &x->props.saddr, 0);
1285 
1286 	__xfrm_state_bump_genids(x);
1287 	__xfrm_state_insert(x);
1288 	err = 0;
1289 
1290 out:
1291 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1292 
1293 	if (x1) {
1294 		xfrm_state_delete(x1);
1295 		xfrm_state_put(x1);
1296 	}
1297 
1298 	if (to_put)
1299 		xfrm_state_put(to_put);
1300 
1301 	return err;
1302 }
1303 EXPORT_SYMBOL(xfrm_state_add);
1304 
1305 #ifdef CONFIG_XFRM_MIGRATE
1306 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1307 					   struct xfrm_encap_tmpl *encap)
1308 {
1309 	struct net *net = xs_net(orig);
1310 	struct xfrm_state *x = xfrm_state_alloc(net);
1311 	if (!x)
1312 		goto out;
1313 
1314 	memcpy(&x->id, &orig->id, sizeof(x->id));
1315 	memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1316 	memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1317 	x->props.mode = orig->props.mode;
1318 	x->props.replay_window = orig->props.replay_window;
1319 	x->props.reqid = orig->props.reqid;
1320 	x->props.family = orig->props.family;
1321 	x->props.saddr = orig->props.saddr;
1322 
1323 	if (orig->aalg) {
1324 		x->aalg = xfrm_algo_auth_clone(orig->aalg);
1325 		if (!x->aalg)
1326 			goto error;
1327 	}
1328 	x->props.aalgo = orig->props.aalgo;
1329 
1330 	if (orig->aead) {
1331 		x->aead = xfrm_algo_aead_clone(orig->aead);
1332 		x->geniv = orig->geniv;
1333 		if (!x->aead)
1334 			goto error;
1335 	}
1336 	if (orig->ealg) {
1337 		x->ealg = xfrm_algo_clone(orig->ealg);
1338 		if (!x->ealg)
1339 			goto error;
1340 	}
1341 	x->props.ealgo = orig->props.ealgo;
1342 
1343 	if (orig->calg) {
1344 		x->calg = xfrm_algo_clone(orig->calg);
1345 		if (!x->calg)
1346 			goto error;
1347 	}
1348 	x->props.calgo = orig->props.calgo;
1349 
1350 	if (encap || orig->encap) {
1351 		if (encap)
1352 			x->encap = kmemdup(encap, sizeof(*x->encap),
1353 					GFP_KERNEL);
1354 		else
1355 			x->encap = kmemdup(orig->encap, sizeof(*x->encap),
1356 					GFP_KERNEL);
1357 
1358 		if (!x->encap)
1359 			goto error;
1360 	}
1361 
1362 	if (orig->coaddr) {
1363 		x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
1364 				    GFP_KERNEL);
1365 		if (!x->coaddr)
1366 			goto error;
1367 	}
1368 
1369 	if (orig->replay_esn) {
1370 		if (xfrm_replay_clone(x, orig))
1371 			goto error;
1372 	}
1373 
1374 	memcpy(&x->mark, &orig->mark, sizeof(x->mark));
1375 
1376 	if (xfrm_init_state(x) < 0)
1377 		goto error;
1378 
1379 	x->props.flags = orig->props.flags;
1380 	x->props.extra_flags = orig->props.extra_flags;
1381 
1382 	x->if_id = orig->if_id;
1383 	x->tfcpad = orig->tfcpad;
1384 	x->replay_maxdiff = orig->replay_maxdiff;
1385 	x->replay_maxage = orig->replay_maxage;
1386 	x->curlft.add_time = orig->curlft.add_time;
1387 	x->km.state = orig->km.state;
1388 	x->km.seq = orig->km.seq;
1389 	x->replay = orig->replay;
1390 	x->preplay = orig->preplay;
1391 
1392 	return x;
1393 
1394  error:
1395 	xfrm_state_put(x);
1396 out:
1397 	return NULL;
1398 }
1399 
1400 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
1401 {
1402 	unsigned int h;
1403 	struct xfrm_state *x = NULL;
1404 
1405 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1406 
1407 	if (m->reqid) {
1408 		h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
1409 				  m->reqid, m->old_family);
1410 		hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1411 			if (x->props.mode != m->mode ||
1412 			    x->id.proto != m->proto)
1413 				continue;
1414 			if (m->reqid && x->props.reqid != m->reqid)
1415 				continue;
1416 			if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1417 					     m->old_family) ||
1418 			    !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1419 					     m->old_family))
1420 				continue;
1421 			xfrm_state_hold(x);
1422 			break;
1423 		}
1424 	} else {
1425 		h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
1426 				  m->old_family);
1427 		hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
1428 			if (x->props.mode != m->mode ||
1429 			    x->id.proto != m->proto)
1430 				continue;
1431 			if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
1432 					     m->old_family) ||
1433 			    !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
1434 					     m->old_family))
1435 				continue;
1436 			xfrm_state_hold(x);
1437 			break;
1438 		}
1439 	}
1440 
1441 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1442 
1443 	return x;
1444 }
1445 EXPORT_SYMBOL(xfrm_migrate_state_find);
1446 
1447 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1448 				      struct xfrm_migrate *m,
1449 				      struct xfrm_encap_tmpl *encap)
1450 {
1451 	struct xfrm_state *xc;
1452 
1453 	xc = xfrm_state_clone(x, encap);
1454 	if (!xc)
1455 		return NULL;
1456 
1457 	memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
1458 	memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
1459 
1460 	/* add state */
1461 	if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
1462 		/* a care is needed when the destination address of the
1463 		   state is to be updated as it is a part of triplet */
1464 		xfrm_state_insert(xc);
1465 	} else {
1466 		if (xfrm_state_add(xc) < 0)
1467 			goto error;
1468 	}
1469 
1470 	return xc;
1471 error:
1472 	xfrm_state_put(xc);
1473 	return NULL;
1474 }
1475 EXPORT_SYMBOL(xfrm_state_migrate);
1476 #endif
1477 
1478 int xfrm_state_update(struct xfrm_state *x)
1479 {
1480 	struct xfrm_state *x1, *to_put;
1481 	int err;
1482 	int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1483 	struct net *net = xs_net(x);
1484 
1485 	to_put = NULL;
1486 
1487 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1488 	x1 = __xfrm_state_locate(x, use_spi, x->props.family);
1489 
1490 	err = -ESRCH;
1491 	if (!x1)
1492 		goto out;
1493 
1494 	if (xfrm_state_kern(x1)) {
1495 		to_put = x1;
1496 		err = -EEXIST;
1497 		goto out;
1498 	}
1499 
1500 	if (x1->km.state == XFRM_STATE_ACQ) {
1501 		__xfrm_state_insert(x);
1502 		x = NULL;
1503 	}
1504 	err = 0;
1505 
1506 out:
1507 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1508 
1509 	if (to_put)
1510 		xfrm_state_put(to_put);
1511 
1512 	if (err)
1513 		return err;
1514 
1515 	if (!x) {
1516 		xfrm_state_delete(x1);
1517 		xfrm_state_put(x1);
1518 		return 0;
1519 	}
1520 
1521 	err = -EINVAL;
1522 	spin_lock_bh(&x1->lock);
1523 	if (likely(x1->km.state == XFRM_STATE_VALID)) {
1524 		if (x->encap && x1->encap &&
1525 		    x->encap->encap_type == x1->encap->encap_type)
1526 			memcpy(x1->encap, x->encap, sizeof(*x1->encap));
1527 		else if (x->encap || x1->encap)
1528 			goto fail;
1529 
1530 		if (x->coaddr && x1->coaddr) {
1531 			memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
1532 		}
1533 		if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
1534 			memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
1535 		memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
1536 		x1->km.dying = 0;
1537 
1538 		hrtimer_start(&x1->mtimer, ktime_set(1, 0),
1539 			      HRTIMER_MODE_REL_SOFT);
1540 		if (x1->curlft.use_time)
1541 			xfrm_state_check_expire(x1);
1542 
1543 		if (x->props.smark.m || x->props.smark.v || x->if_id) {
1544 			spin_lock_bh(&net->xfrm.xfrm_state_lock);
1545 
1546 			if (x->props.smark.m || x->props.smark.v)
1547 				x1->props.smark = x->props.smark;
1548 
1549 			if (x->if_id)
1550 				x1->if_id = x->if_id;
1551 
1552 			__xfrm_state_bump_genids(x1);
1553 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1554 		}
1555 
1556 		err = 0;
1557 		x->km.state = XFRM_STATE_DEAD;
1558 		__xfrm_state_put(x);
1559 	}
1560 
1561 fail:
1562 	spin_unlock_bh(&x1->lock);
1563 
1564 	xfrm_state_put(x1);
1565 
1566 	return err;
1567 }
1568 EXPORT_SYMBOL(xfrm_state_update);
1569 
1570 int xfrm_state_check_expire(struct xfrm_state *x)
1571 {
1572 	if (!x->curlft.use_time)
1573 		x->curlft.use_time = ktime_get_real_seconds();
1574 
1575 	if (x->curlft.bytes >= x->lft.hard_byte_limit ||
1576 	    x->curlft.packets >= x->lft.hard_packet_limit) {
1577 		x->km.state = XFRM_STATE_EXPIRED;
1578 		hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
1579 		return -EINVAL;
1580 	}
1581 
1582 	if (!x->km.dying &&
1583 	    (x->curlft.bytes >= x->lft.soft_byte_limit ||
1584 	     x->curlft.packets >= x->lft.soft_packet_limit)) {
1585 		x->km.dying = 1;
1586 		km_state_expired(x, 0, 0);
1587 	}
1588 	return 0;
1589 }
1590 EXPORT_SYMBOL(xfrm_state_check_expire);
1591 
1592 struct xfrm_state *
1593 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
1594 		  u8 proto, unsigned short family)
1595 {
1596 	struct xfrm_state *x;
1597 
1598 	rcu_read_lock();
1599 	x = __xfrm_state_lookup(net, mark, daddr, spi, proto, family);
1600 	rcu_read_unlock();
1601 	return x;
1602 }
1603 EXPORT_SYMBOL(xfrm_state_lookup);
1604 
1605 struct xfrm_state *
1606 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1607 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1608 			 u8 proto, unsigned short family)
1609 {
1610 	struct xfrm_state *x;
1611 
1612 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1613 	x = __xfrm_state_lookup_byaddr(net, mark, daddr, saddr, proto, family);
1614 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1615 	return x;
1616 }
1617 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
1618 
1619 struct xfrm_state *
1620 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
1621 	      u32 if_id, u8 proto, const xfrm_address_t *daddr,
1622 	      const xfrm_address_t *saddr, int create, unsigned short family)
1623 {
1624 	struct xfrm_state *x;
1625 
1626 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1627 	x = __find_acq_core(net, mark, family, mode, reqid, if_id, proto, daddr, saddr, create);
1628 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1629 
1630 	return x;
1631 }
1632 EXPORT_SYMBOL(xfrm_find_acq);
1633 
1634 #ifdef CONFIG_XFRM_SUB_POLICY
1635 int
1636 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1637 	       unsigned short family, struct net *net)
1638 {
1639 	int i;
1640 	int err = 0;
1641 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1642 	if (!afinfo)
1643 		return -EAFNOSUPPORT;
1644 
1645 	spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
1646 	if (afinfo->tmpl_sort)
1647 		err = afinfo->tmpl_sort(dst, src, n);
1648 	else
1649 		for (i = 0; i < n; i++)
1650 			dst[i] = src[i];
1651 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1652 	rcu_read_unlock();
1653 	return err;
1654 }
1655 EXPORT_SYMBOL(xfrm_tmpl_sort);
1656 
1657 int
1658 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1659 		unsigned short family)
1660 {
1661 	int i;
1662 	int err = 0;
1663 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
1664 	struct net *net = xs_net(*src);
1665 
1666 	if (!afinfo)
1667 		return -EAFNOSUPPORT;
1668 
1669 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1670 	if (afinfo->state_sort)
1671 		err = afinfo->state_sort(dst, src, n);
1672 	else
1673 		for (i = 0; i < n; i++)
1674 			dst[i] = src[i];
1675 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1676 	rcu_read_unlock();
1677 	return err;
1678 }
1679 EXPORT_SYMBOL(xfrm_state_sort);
1680 #endif
1681 
1682 /* Silly enough, but I'm lazy to build resolution list */
1683 
1684 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1685 {
1686 	int i;
1687 
1688 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
1689 		struct xfrm_state *x;
1690 
1691 		hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
1692 			if (x->km.seq == seq &&
1693 			    (mark & x->mark.m) == x->mark.v &&
1694 			    x->km.state == XFRM_STATE_ACQ) {
1695 				xfrm_state_hold(x);
1696 				return x;
1697 			}
1698 		}
1699 	}
1700 	return NULL;
1701 }
1702 
1703 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq)
1704 {
1705 	struct xfrm_state *x;
1706 
1707 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1708 	x = __xfrm_find_acq_byseq(net, mark, seq);
1709 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1710 	return x;
1711 }
1712 EXPORT_SYMBOL(xfrm_find_acq_byseq);
1713 
1714 u32 xfrm_get_acqseq(void)
1715 {
1716 	u32 res;
1717 	static atomic_t acqseq;
1718 
1719 	do {
1720 		res = atomic_inc_return(&acqseq);
1721 	} while (!res);
1722 
1723 	return res;
1724 }
1725 EXPORT_SYMBOL(xfrm_get_acqseq);
1726 
1727 int verify_spi_info(u8 proto, u32 min, u32 max)
1728 {
1729 	switch (proto) {
1730 	case IPPROTO_AH:
1731 	case IPPROTO_ESP:
1732 		break;
1733 
1734 	case IPPROTO_COMP:
1735 		/* IPCOMP spi is 16-bits. */
1736 		if (max >= 0x10000)
1737 			return -EINVAL;
1738 		break;
1739 
1740 	default:
1741 		return -EINVAL;
1742 	}
1743 
1744 	if (min > max)
1745 		return -EINVAL;
1746 
1747 	return 0;
1748 }
1749 EXPORT_SYMBOL(verify_spi_info);
1750 
1751 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
1752 {
1753 	struct net *net = xs_net(x);
1754 	unsigned int h;
1755 	struct xfrm_state *x0;
1756 	int err = -ENOENT;
1757 	__be32 minspi = htonl(low);
1758 	__be32 maxspi = htonl(high);
1759 	u32 mark = x->mark.v & x->mark.m;
1760 
1761 	spin_lock_bh(&x->lock);
1762 	if (x->km.state == XFRM_STATE_DEAD)
1763 		goto unlock;
1764 
1765 	err = 0;
1766 	if (x->id.spi)
1767 		goto unlock;
1768 
1769 	err = -ENOENT;
1770 
1771 	if (minspi == maxspi) {
1772 		x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
1773 		if (x0) {
1774 			xfrm_state_put(x0);
1775 			goto unlock;
1776 		}
1777 		x->id.spi = minspi;
1778 	} else {
1779 		u32 spi = 0;
1780 		for (h = 0; h < high-low+1; h++) {
1781 			spi = low + prandom_u32()%(high-low+1);
1782 			x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
1783 			if (x0 == NULL) {
1784 				x->id.spi = htonl(spi);
1785 				break;
1786 			}
1787 			xfrm_state_put(x0);
1788 		}
1789 	}
1790 	if (x->id.spi) {
1791 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
1792 		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
1793 		hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
1794 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1795 
1796 		err = 0;
1797 	}
1798 
1799 unlock:
1800 	spin_unlock_bh(&x->lock);
1801 
1802 	return err;
1803 }
1804 EXPORT_SYMBOL(xfrm_alloc_spi);
1805 
1806 static bool __xfrm_state_filter_match(struct xfrm_state *x,
1807 				      struct xfrm_address_filter *filter)
1808 {
1809 	if (filter) {
1810 		if ((filter->family == AF_INET ||
1811 		     filter->family == AF_INET6) &&
1812 		    x->props.family != filter->family)
1813 			return false;
1814 
1815 		return addr_match(&x->props.saddr, &filter->saddr,
1816 				  filter->splen) &&
1817 		       addr_match(&x->id.daddr, &filter->daddr,
1818 				  filter->dplen);
1819 	}
1820 	return true;
1821 }
1822 
1823 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1824 		    int (*func)(struct xfrm_state *, int, void*),
1825 		    void *data)
1826 {
1827 	struct xfrm_state *state;
1828 	struct xfrm_state_walk *x;
1829 	int err = 0;
1830 
1831 	if (walk->seq != 0 && list_empty(&walk->all))
1832 		return 0;
1833 
1834 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1835 	if (list_empty(&walk->all))
1836 		x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
1837 	else
1838 		x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
1839 	list_for_each_entry_from(x, &net->xfrm.state_all, all) {
1840 		if (x->state == XFRM_STATE_DEAD)
1841 			continue;
1842 		state = container_of(x, struct xfrm_state, km);
1843 		if (!xfrm_id_proto_match(state->id.proto, walk->proto))
1844 			continue;
1845 		if (!__xfrm_state_filter_match(state, walk->filter))
1846 			continue;
1847 		err = func(state, walk->seq, data);
1848 		if (err) {
1849 			list_move_tail(&walk->all, &x->all);
1850 			goto out;
1851 		}
1852 		walk->seq++;
1853 	}
1854 	if (walk->seq == 0) {
1855 		err = -ENOENT;
1856 		goto out;
1857 	}
1858 	list_del_init(&walk->all);
1859 out:
1860 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1861 	return err;
1862 }
1863 EXPORT_SYMBOL(xfrm_state_walk);
1864 
1865 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1866 			  struct xfrm_address_filter *filter)
1867 {
1868 	INIT_LIST_HEAD(&walk->all);
1869 	walk->proto = proto;
1870 	walk->state = XFRM_STATE_DEAD;
1871 	walk->seq = 0;
1872 	walk->filter = filter;
1873 }
1874 EXPORT_SYMBOL(xfrm_state_walk_init);
1875 
1876 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
1877 {
1878 	kfree(walk->filter);
1879 
1880 	if (list_empty(&walk->all))
1881 		return;
1882 
1883 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1884 	list_del(&walk->all);
1885 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1886 }
1887 EXPORT_SYMBOL(xfrm_state_walk_done);
1888 
1889 static void xfrm_replay_timer_handler(struct timer_list *t)
1890 {
1891 	struct xfrm_state *x = from_timer(x, t, rtimer);
1892 
1893 	spin_lock(&x->lock);
1894 
1895 	if (x->km.state == XFRM_STATE_VALID) {
1896 		if (xfrm_aevent_is_on(xs_net(x)))
1897 			x->repl->notify(x, XFRM_REPLAY_TIMEOUT);
1898 		else
1899 			x->xflags |= XFRM_TIME_DEFER;
1900 	}
1901 
1902 	spin_unlock(&x->lock);
1903 }
1904 
1905 static LIST_HEAD(xfrm_km_list);
1906 
1907 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
1908 {
1909 	struct xfrm_mgr *km;
1910 
1911 	rcu_read_lock();
1912 	list_for_each_entry_rcu(km, &xfrm_km_list, list)
1913 		if (km->notify_policy)
1914 			km->notify_policy(xp, dir, c);
1915 	rcu_read_unlock();
1916 }
1917 
1918 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
1919 {
1920 	struct xfrm_mgr *km;
1921 	rcu_read_lock();
1922 	list_for_each_entry_rcu(km, &xfrm_km_list, list)
1923 		if (km->notify)
1924 			km->notify(x, c);
1925 	rcu_read_unlock();
1926 }
1927 
1928 EXPORT_SYMBOL(km_policy_notify);
1929 EXPORT_SYMBOL(km_state_notify);
1930 
1931 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
1932 {
1933 	struct km_event c;
1934 
1935 	c.data.hard = hard;
1936 	c.portid = portid;
1937 	c.event = XFRM_MSG_EXPIRE;
1938 	km_state_notify(x, &c);
1939 }
1940 
1941 EXPORT_SYMBOL(km_state_expired);
1942 /*
1943  * We send to all registered managers regardless of failure
1944  * We are happy with one success
1945 */
1946 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
1947 {
1948 	int err = -EINVAL, acqret;
1949 	struct xfrm_mgr *km;
1950 
1951 	rcu_read_lock();
1952 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1953 		acqret = km->acquire(x, t, pol);
1954 		if (!acqret)
1955 			err = acqret;
1956 	}
1957 	rcu_read_unlock();
1958 	return err;
1959 }
1960 EXPORT_SYMBOL(km_query);
1961 
1962 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
1963 {
1964 	int err = -EINVAL;
1965 	struct xfrm_mgr *km;
1966 
1967 	rcu_read_lock();
1968 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
1969 		if (km->new_mapping)
1970 			err = km->new_mapping(x, ipaddr, sport);
1971 		if (!err)
1972 			break;
1973 	}
1974 	rcu_read_unlock();
1975 	return err;
1976 }
1977 EXPORT_SYMBOL(km_new_mapping);
1978 
1979 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
1980 {
1981 	struct km_event c;
1982 
1983 	c.data.hard = hard;
1984 	c.portid = portid;
1985 	c.event = XFRM_MSG_POLEXPIRE;
1986 	km_policy_notify(pol, dir, &c);
1987 }
1988 EXPORT_SYMBOL(km_policy_expired);
1989 
1990 #ifdef CONFIG_XFRM_MIGRATE
1991 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1992 	       const struct xfrm_migrate *m, int num_migrate,
1993 	       const struct xfrm_kmaddress *k,
1994 	       const struct xfrm_encap_tmpl *encap)
1995 {
1996 	int err = -EINVAL;
1997 	int ret;
1998 	struct xfrm_mgr *km;
1999 
2000 	rcu_read_lock();
2001 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2002 		if (km->migrate) {
2003 			ret = km->migrate(sel, dir, type, m, num_migrate, k,
2004 					  encap);
2005 			if (!ret)
2006 				err = ret;
2007 		}
2008 	}
2009 	rcu_read_unlock();
2010 	return err;
2011 }
2012 EXPORT_SYMBOL(km_migrate);
2013 #endif
2014 
2015 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2016 {
2017 	int err = -EINVAL;
2018 	int ret;
2019 	struct xfrm_mgr *km;
2020 
2021 	rcu_read_lock();
2022 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2023 		if (km->report) {
2024 			ret = km->report(net, proto, sel, addr);
2025 			if (!ret)
2026 				err = ret;
2027 		}
2028 	}
2029 	rcu_read_unlock();
2030 	return err;
2031 }
2032 EXPORT_SYMBOL(km_report);
2033 
2034 static bool km_is_alive(const struct km_event *c)
2035 {
2036 	struct xfrm_mgr *km;
2037 	bool is_alive = false;
2038 
2039 	rcu_read_lock();
2040 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2041 		if (km->is_alive && km->is_alive(c)) {
2042 			is_alive = true;
2043 			break;
2044 		}
2045 	}
2046 	rcu_read_unlock();
2047 
2048 	return is_alive;
2049 }
2050 
2051 int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
2052 {
2053 	int err;
2054 	u8 *data;
2055 	struct xfrm_mgr *km;
2056 	struct xfrm_policy *pol = NULL;
2057 
2058 	if (in_compat_syscall())
2059 		return -EOPNOTSUPP;
2060 
2061 	if (!optval && !optlen) {
2062 		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2063 		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2064 		__sk_dst_reset(sk);
2065 		return 0;
2066 	}
2067 
2068 	if (optlen <= 0 || optlen > PAGE_SIZE)
2069 		return -EMSGSIZE;
2070 
2071 	data = memdup_user(optval, optlen);
2072 	if (IS_ERR(data))
2073 		return PTR_ERR(data);
2074 
2075 	err = -EINVAL;
2076 	rcu_read_lock();
2077 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2078 		pol = km->compile_policy(sk, optname, data,
2079 					 optlen, &err);
2080 		if (err >= 0)
2081 			break;
2082 	}
2083 	rcu_read_unlock();
2084 
2085 	if (err >= 0) {
2086 		xfrm_sk_policy_insert(sk, err, pol);
2087 		xfrm_pol_put(pol);
2088 		__sk_dst_reset(sk);
2089 		err = 0;
2090 	}
2091 
2092 	kfree(data);
2093 	return err;
2094 }
2095 EXPORT_SYMBOL(xfrm_user_policy);
2096 
2097 static DEFINE_SPINLOCK(xfrm_km_lock);
2098 
2099 int xfrm_register_km(struct xfrm_mgr *km)
2100 {
2101 	spin_lock_bh(&xfrm_km_lock);
2102 	list_add_tail_rcu(&km->list, &xfrm_km_list);
2103 	spin_unlock_bh(&xfrm_km_lock);
2104 	return 0;
2105 }
2106 EXPORT_SYMBOL(xfrm_register_km);
2107 
2108 int xfrm_unregister_km(struct xfrm_mgr *km)
2109 {
2110 	spin_lock_bh(&xfrm_km_lock);
2111 	list_del_rcu(&km->list);
2112 	spin_unlock_bh(&xfrm_km_lock);
2113 	synchronize_rcu();
2114 	return 0;
2115 }
2116 EXPORT_SYMBOL(xfrm_unregister_km);
2117 
2118 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
2119 {
2120 	int err = 0;
2121 
2122 	if (WARN_ON(afinfo->family >= NPROTO))
2123 		return -EAFNOSUPPORT;
2124 
2125 	spin_lock_bh(&xfrm_state_afinfo_lock);
2126 	if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
2127 		err = -EEXIST;
2128 	else
2129 		rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
2130 	spin_unlock_bh(&xfrm_state_afinfo_lock);
2131 	return err;
2132 }
2133 EXPORT_SYMBOL(xfrm_state_register_afinfo);
2134 
2135 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
2136 {
2137 	int err = 0, family = afinfo->family;
2138 
2139 	if (WARN_ON(family >= NPROTO))
2140 		return -EAFNOSUPPORT;
2141 
2142 	spin_lock_bh(&xfrm_state_afinfo_lock);
2143 	if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
2144 		if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
2145 			err = -EINVAL;
2146 		else
2147 			RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
2148 	}
2149 	spin_unlock_bh(&xfrm_state_afinfo_lock);
2150 	synchronize_rcu();
2151 	return err;
2152 }
2153 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
2154 
2155 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
2156 {
2157 	if (unlikely(family >= NPROTO))
2158 		return NULL;
2159 
2160 	return rcu_dereference(xfrm_state_afinfo[family]);
2161 }
2162 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
2163 
2164 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
2165 {
2166 	struct xfrm_state_afinfo *afinfo;
2167 	if (unlikely(family >= NPROTO))
2168 		return NULL;
2169 	rcu_read_lock();
2170 	afinfo = rcu_dereference(xfrm_state_afinfo[family]);
2171 	if (unlikely(!afinfo))
2172 		rcu_read_unlock();
2173 	return afinfo;
2174 }
2175 
2176 void xfrm_flush_gc(void)
2177 {
2178 	flush_work(&xfrm_state_gc_work);
2179 }
2180 EXPORT_SYMBOL(xfrm_flush_gc);
2181 
2182 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
2183 void xfrm_state_delete_tunnel(struct xfrm_state *x)
2184 {
2185 	if (x->tunnel) {
2186 		struct xfrm_state *t = x->tunnel;
2187 
2188 		if (atomic_read(&t->tunnel_users) == 2)
2189 			xfrm_state_delete(t);
2190 		atomic_dec(&t->tunnel_users);
2191 		xfrm_state_put_sync(t);
2192 		x->tunnel = NULL;
2193 	}
2194 }
2195 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
2196 
2197 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
2198 {
2199 	const struct xfrm_type *type = READ_ONCE(x->type);
2200 
2201 	if (x->km.state == XFRM_STATE_VALID &&
2202 	    type && type->get_mtu)
2203 		return type->get_mtu(x, mtu);
2204 
2205 	return mtu - x->props.header_len;
2206 }
2207 
2208 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
2209 {
2210 	const struct xfrm_state_afinfo *afinfo;
2211 	const struct xfrm_mode *inner_mode;
2212 	const struct xfrm_mode *outer_mode;
2213 	int family = x->props.family;
2214 	int err;
2215 
2216 	err = -EAFNOSUPPORT;
2217 	afinfo = xfrm_state_get_afinfo(family);
2218 	if (!afinfo)
2219 		goto error;
2220 
2221 	err = 0;
2222 	if (afinfo->init_flags)
2223 		err = afinfo->init_flags(x);
2224 
2225 	rcu_read_unlock();
2226 
2227 	if (err)
2228 		goto error;
2229 
2230 	err = -EPROTONOSUPPORT;
2231 
2232 	if (x->sel.family != AF_UNSPEC) {
2233 		inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
2234 		if (inner_mode == NULL)
2235 			goto error;
2236 
2237 		if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2238 		    family != x->sel.family)
2239 			goto error;
2240 
2241 		x->inner_mode = *inner_mode;
2242 	} else {
2243 		const struct xfrm_mode *inner_mode_iaf;
2244 		int iafamily = AF_INET;
2245 
2246 		inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
2247 		if (inner_mode == NULL)
2248 			goto error;
2249 
2250 		if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL))
2251 			goto error;
2252 
2253 		x->inner_mode = *inner_mode;
2254 
2255 		if (x->props.family == AF_INET)
2256 			iafamily = AF_INET6;
2257 
2258 		inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
2259 		if (inner_mode_iaf) {
2260 			if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
2261 				x->inner_mode_iaf = *inner_mode_iaf;
2262 		}
2263 	}
2264 
2265 	x->type = xfrm_get_type(x->id.proto, family);
2266 	if (x->type == NULL)
2267 		goto error;
2268 
2269 	x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload);
2270 
2271 	err = x->type->init_state(x);
2272 	if (err)
2273 		goto error;
2274 
2275 	outer_mode = xfrm_get_mode(x->props.mode, family);
2276 	if (!outer_mode) {
2277 		err = -EPROTONOSUPPORT;
2278 		goto error;
2279 	}
2280 
2281 	x->outer_mode = *outer_mode;
2282 	if (init_replay) {
2283 		err = xfrm_init_replay(x);
2284 		if (err)
2285 			goto error;
2286 	}
2287 
2288 error:
2289 	return err;
2290 }
2291 
2292 EXPORT_SYMBOL(__xfrm_init_state);
2293 
2294 int xfrm_init_state(struct xfrm_state *x)
2295 {
2296 	int err;
2297 
2298 	err = __xfrm_init_state(x, true, false);
2299 	if (!err)
2300 		x->km.state = XFRM_STATE_VALID;
2301 
2302 	return err;
2303 }
2304 
2305 EXPORT_SYMBOL(xfrm_init_state);
2306 
2307 int __net_init xfrm_state_init(struct net *net)
2308 {
2309 	unsigned int sz;
2310 
2311 	if (net_eq(net, &init_net))
2312 		xfrm_state_cache = KMEM_CACHE(xfrm_state,
2313 					      SLAB_HWCACHE_ALIGN | SLAB_PANIC);
2314 
2315 	INIT_LIST_HEAD(&net->xfrm.state_all);
2316 
2317 	sz = sizeof(struct hlist_head) * 8;
2318 
2319 	net->xfrm.state_bydst = xfrm_hash_alloc(sz);
2320 	if (!net->xfrm.state_bydst)
2321 		goto out_bydst;
2322 	net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
2323 	if (!net->xfrm.state_bysrc)
2324 		goto out_bysrc;
2325 	net->xfrm.state_byspi = xfrm_hash_alloc(sz);
2326 	if (!net->xfrm.state_byspi)
2327 		goto out_byspi;
2328 	net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
2329 
2330 	net->xfrm.state_num = 0;
2331 	INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
2332 	spin_lock_init(&net->xfrm.xfrm_state_lock);
2333 	return 0;
2334 
2335 out_byspi:
2336 	xfrm_hash_free(net->xfrm.state_bysrc, sz);
2337 out_bysrc:
2338 	xfrm_hash_free(net->xfrm.state_bydst, sz);
2339 out_bydst:
2340 	return -ENOMEM;
2341 }
2342 
2343 void xfrm_state_fini(struct net *net)
2344 {
2345 	unsigned int sz;
2346 
2347 	flush_work(&net->xfrm.state_hash_work);
2348 	flush_work(&xfrm_state_gc_work);
2349 	xfrm_state_flush(net, 0, false, true);
2350 
2351 	WARN_ON(!list_empty(&net->xfrm.state_all));
2352 
2353 	sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
2354 	WARN_ON(!hlist_empty(net->xfrm.state_byspi));
2355 	xfrm_hash_free(net->xfrm.state_byspi, sz);
2356 	WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
2357 	xfrm_hash_free(net->xfrm.state_bysrc, sz);
2358 	WARN_ON(!hlist_empty(net->xfrm.state_bydst));
2359 	xfrm_hash_free(net->xfrm.state_bydst, sz);
2360 }
2361 
2362 #ifdef CONFIG_AUDITSYSCALL
2363 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2364 				     struct audit_buffer *audit_buf)
2365 {
2366 	struct xfrm_sec_ctx *ctx = x->security;
2367 	u32 spi = ntohl(x->id.spi);
2368 
2369 	if (ctx)
2370 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2371 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2372 
2373 	switch (x->props.family) {
2374 	case AF_INET:
2375 		audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2376 				 &x->props.saddr.a4, &x->id.daddr.a4);
2377 		break;
2378 	case AF_INET6:
2379 		audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
2380 				 x->props.saddr.a6, x->id.daddr.a6);
2381 		break;
2382 	}
2383 
2384 	audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2385 }
2386 
2387 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2388 				      struct audit_buffer *audit_buf)
2389 {
2390 	const struct iphdr *iph4;
2391 	const struct ipv6hdr *iph6;
2392 
2393 	switch (family) {
2394 	case AF_INET:
2395 		iph4 = ip_hdr(skb);
2396 		audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
2397 				 &iph4->saddr, &iph4->daddr);
2398 		break;
2399 	case AF_INET6:
2400 		iph6 = ipv6_hdr(skb);
2401 		audit_log_format(audit_buf,
2402 				 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
2403 				 &iph6->saddr, &iph6->daddr,
2404 				 iph6->flow_lbl[0] & 0x0f,
2405 				 iph6->flow_lbl[1],
2406 				 iph6->flow_lbl[2]);
2407 		break;
2408 	}
2409 }
2410 
2411 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
2412 {
2413 	struct audit_buffer *audit_buf;
2414 
2415 	audit_buf = xfrm_audit_start("SAD-add");
2416 	if (audit_buf == NULL)
2417 		return;
2418 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2419 	xfrm_audit_helper_sainfo(x, audit_buf);
2420 	audit_log_format(audit_buf, " res=%u", result);
2421 	audit_log_end(audit_buf);
2422 }
2423 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
2424 
2425 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
2426 {
2427 	struct audit_buffer *audit_buf;
2428 
2429 	audit_buf = xfrm_audit_start("SAD-delete");
2430 	if (audit_buf == NULL)
2431 		return;
2432 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2433 	xfrm_audit_helper_sainfo(x, audit_buf);
2434 	audit_log_format(audit_buf, " res=%u", result);
2435 	audit_log_end(audit_buf);
2436 }
2437 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
2438 
2439 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
2440 				      struct sk_buff *skb)
2441 {
2442 	struct audit_buffer *audit_buf;
2443 	u32 spi;
2444 
2445 	audit_buf = xfrm_audit_start("SA-replay-overflow");
2446 	if (audit_buf == NULL)
2447 		return;
2448 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2449 	/* don't record the sequence number because it's inherent in this kind
2450 	 * of audit message */
2451 	spi = ntohl(x->id.spi);
2452 	audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
2453 	audit_log_end(audit_buf);
2454 }
2455 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
2456 
2457 void xfrm_audit_state_replay(struct xfrm_state *x,
2458 			     struct sk_buff *skb, __be32 net_seq)
2459 {
2460 	struct audit_buffer *audit_buf;
2461 	u32 spi;
2462 
2463 	audit_buf = xfrm_audit_start("SA-replayed-pkt");
2464 	if (audit_buf == NULL)
2465 		return;
2466 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2467 	spi = ntohl(x->id.spi);
2468 	audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2469 			 spi, spi, ntohl(net_seq));
2470 	audit_log_end(audit_buf);
2471 }
2472 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
2473 
2474 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
2475 {
2476 	struct audit_buffer *audit_buf;
2477 
2478 	audit_buf = xfrm_audit_start("SA-notfound");
2479 	if (audit_buf == NULL)
2480 		return;
2481 	xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2482 	audit_log_end(audit_buf);
2483 }
2484 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
2485 
2486 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
2487 			       __be32 net_spi, __be32 net_seq)
2488 {
2489 	struct audit_buffer *audit_buf;
2490 	u32 spi;
2491 
2492 	audit_buf = xfrm_audit_start("SA-notfound");
2493 	if (audit_buf == NULL)
2494 		return;
2495 	xfrm_audit_helper_pktinfo(skb, family, audit_buf);
2496 	spi = ntohl(net_spi);
2497 	audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2498 			 spi, spi, ntohl(net_seq));
2499 	audit_log_end(audit_buf);
2500 }
2501 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
2502 
2503 void xfrm_audit_state_icvfail(struct xfrm_state *x,
2504 			      struct sk_buff *skb, u8 proto)
2505 {
2506 	struct audit_buffer *audit_buf;
2507 	__be32 net_spi;
2508 	__be32 net_seq;
2509 
2510 	audit_buf = xfrm_audit_start("SA-icv-failure");
2511 	if (audit_buf == NULL)
2512 		return;
2513 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
2514 	if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
2515 		u32 spi = ntohl(net_spi);
2516 		audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
2517 				 spi, spi, ntohl(net_seq));
2518 	}
2519 	audit_log_end(audit_buf);
2520 }
2521 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
2522 #endif /* CONFIG_AUDITSYSCALL */
2523