xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision 089a49b6)
1 /*
2  * xfrm_policy.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	Kazunori MIYAZAWA @USAGI
10  * 	YOSHIFUJI Hideaki
11  * 		Split up af-specific portion
12  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13  *
14  */
15 
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
28 #include <net/dst.h>
29 #include <net/flow.h>
30 #include <net/xfrm.h>
31 #include <net/ip.h>
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35 
36 #include "xfrm_hash.h"
37 
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN	100
41 
42 DEFINE_MUTEX(xfrm_cfg_mutex);
43 EXPORT_SYMBOL(xfrm_cfg_mutex);
44 
45 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
46 static struct dst_entry *xfrm_policy_sk_bundles;
47 static DEFINE_RWLOCK(xfrm_policy_lock);
48 
49 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
50 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
51 						__read_mostly;
52 
53 static struct kmem_cache *xfrm_dst_cache __read_mostly;
54 
55 static void xfrm_init_pmtu(struct dst_entry *dst);
56 static int stale_bundle(struct dst_entry *dst);
57 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
58 static void xfrm_policy_queue_process(unsigned long arg);
59 
60 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
61 						int dir);
62 
63 static inline bool
64 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
65 {
66 	const struct flowi4 *fl4 = &fl->u.ip4;
67 
68 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
69 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
70 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
71 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
72 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
73 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
74 }
75 
76 static inline bool
77 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
78 {
79 	const struct flowi6 *fl6 = &fl->u.ip6;
80 
81 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
82 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
83 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
84 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
85 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
86 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
87 }
88 
89 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
90 			 unsigned short family)
91 {
92 	switch (family) {
93 	case AF_INET:
94 		return __xfrm4_selector_match(sel, fl);
95 	case AF_INET6:
96 		return __xfrm6_selector_match(sel, fl);
97 	}
98 	return false;
99 }
100 
101 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
102 {
103 	struct xfrm_policy_afinfo *afinfo;
104 
105 	if (unlikely(family >= NPROTO))
106 		return NULL;
107 	rcu_read_lock();
108 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
109 	if (unlikely(!afinfo))
110 		rcu_read_unlock();
111 	return afinfo;
112 }
113 
114 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
115 {
116 	rcu_read_unlock();
117 }
118 
119 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
120 						  const xfrm_address_t *saddr,
121 						  const xfrm_address_t *daddr,
122 						  int family)
123 {
124 	struct xfrm_policy_afinfo *afinfo;
125 	struct dst_entry *dst;
126 
127 	afinfo = xfrm_policy_get_afinfo(family);
128 	if (unlikely(afinfo == NULL))
129 		return ERR_PTR(-EAFNOSUPPORT);
130 
131 	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
132 
133 	xfrm_policy_put_afinfo(afinfo);
134 
135 	return dst;
136 }
137 
138 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
139 						xfrm_address_t *prev_saddr,
140 						xfrm_address_t *prev_daddr,
141 						int family)
142 {
143 	struct net *net = xs_net(x);
144 	xfrm_address_t *saddr = &x->props.saddr;
145 	xfrm_address_t *daddr = &x->id.daddr;
146 	struct dst_entry *dst;
147 
148 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
149 		saddr = x->coaddr;
150 		daddr = prev_daddr;
151 	}
152 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
153 		saddr = prev_saddr;
154 		daddr = x->coaddr;
155 	}
156 
157 	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
158 
159 	if (!IS_ERR(dst)) {
160 		if (prev_saddr != saddr)
161 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
162 		if (prev_daddr != daddr)
163 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
164 	}
165 
166 	return dst;
167 }
168 
169 static inline unsigned long make_jiffies(long secs)
170 {
171 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
172 		return MAX_SCHEDULE_TIMEOUT-1;
173 	else
174 		return secs*HZ;
175 }
176 
177 static void xfrm_policy_timer(unsigned long data)
178 {
179 	struct xfrm_policy *xp = (struct xfrm_policy*)data;
180 	unsigned long now = get_seconds();
181 	long next = LONG_MAX;
182 	int warn = 0;
183 	int dir;
184 
185 	read_lock(&xp->lock);
186 
187 	if (unlikely(xp->walk.dead))
188 		goto out;
189 
190 	dir = xfrm_policy_id2dir(xp->index);
191 
192 	if (xp->lft.hard_add_expires_seconds) {
193 		long tmo = xp->lft.hard_add_expires_seconds +
194 			xp->curlft.add_time - now;
195 		if (tmo <= 0)
196 			goto expired;
197 		if (tmo < next)
198 			next = tmo;
199 	}
200 	if (xp->lft.hard_use_expires_seconds) {
201 		long tmo = xp->lft.hard_use_expires_seconds +
202 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
203 		if (tmo <= 0)
204 			goto expired;
205 		if (tmo < next)
206 			next = tmo;
207 	}
208 	if (xp->lft.soft_add_expires_seconds) {
209 		long tmo = xp->lft.soft_add_expires_seconds +
210 			xp->curlft.add_time - now;
211 		if (tmo <= 0) {
212 			warn = 1;
213 			tmo = XFRM_KM_TIMEOUT;
214 		}
215 		if (tmo < next)
216 			next = tmo;
217 	}
218 	if (xp->lft.soft_use_expires_seconds) {
219 		long tmo = xp->lft.soft_use_expires_seconds +
220 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
221 		if (tmo <= 0) {
222 			warn = 1;
223 			tmo = XFRM_KM_TIMEOUT;
224 		}
225 		if (tmo < next)
226 			next = tmo;
227 	}
228 
229 	if (warn)
230 		km_policy_expired(xp, dir, 0, 0);
231 	if (next != LONG_MAX &&
232 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
233 		xfrm_pol_hold(xp);
234 
235 out:
236 	read_unlock(&xp->lock);
237 	xfrm_pol_put(xp);
238 	return;
239 
240 expired:
241 	read_unlock(&xp->lock);
242 	if (!xfrm_policy_delete(xp, dir))
243 		km_policy_expired(xp, dir, 1, 0);
244 	xfrm_pol_put(xp);
245 }
246 
247 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
248 {
249 	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
250 
251 	if (unlikely(pol->walk.dead))
252 		flo = NULL;
253 	else
254 		xfrm_pol_hold(pol);
255 
256 	return flo;
257 }
258 
259 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
260 {
261 	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
262 
263 	return !pol->walk.dead;
264 }
265 
266 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
267 {
268 	xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
269 }
270 
271 static const struct flow_cache_ops xfrm_policy_fc_ops = {
272 	.get = xfrm_policy_flo_get,
273 	.check = xfrm_policy_flo_check,
274 	.delete = xfrm_policy_flo_delete,
275 };
276 
277 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
278  * SPD calls.
279  */
280 
281 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
282 {
283 	struct xfrm_policy *policy;
284 
285 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
286 
287 	if (policy) {
288 		write_pnet(&policy->xp_net, net);
289 		INIT_LIST_HEAD(&policy->walk.all);
290 		INIT_HLIST_NODE(&policy->bydst);
291 		INIT_HLIST_NODE(&policy->byidx);
292 		rwlock_init(&policy->lock);
293 		atomic_set(&policy->refcnt, 1);
294 		skb_queue_head_init(&policy->polq.hold_queue);
295 		setup_timer(&policy->timer, xfrm_policy_timer,
296 				(unsigned long)policy);
297 		setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
298 			    (unsigned long)policy);
299 		policy->flo.ops = &xfrm_policy_fc_ops;
300 	}
301 	return policy;
302 }
303 EXPORT_SYMBOL(xfrm_policy_alloc);
304 
305 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
306 
307 void xfrm_policy_destroy(struct xfrm_policy *policy)
308 {
309 	BUG_ON(!policy->walk.dead);
310 
311 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
312 		BUG();
313 
314 	security_xfrm_policy_free(policy->security);
315 	kfree(policy);
316 }
317 EXPORT_SYMBOL(xfrm_policy_destroy);
318 
319 static void xfrm_queue_purge(struct sk_buff_head *list)
320 {
321 	struct sk_buff *skb;
322 
323 	while ((skb = skb_dequeue(list)) != NULL)
324 		kfree_skb(skb);
325 }
326 
327 /* Rule must be locked. Release descentant resources, announce
328  * entry dead. The rule must be unlinked from lists to the moment.
329  */
330 
331 static void xfrm_policy_kill(struct xfrm_policy *policy)
332 {
333 	policy->walk.dead = 1;
334 
335 	atomic_inc(&policy->genid);
336 
337 	del_timer(&policy->polq.hold_timer);
338 	xfrm_queue_purge(&policy->polq.hold_queue);
339 
340 	if (del_timer(&policy->timer))
341 		xfrm_pol_put(policy);
342 
343 	xfrm_pol_put(policy);
344 }
345 
346 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
347 
348 static inline unsigned int idx_hash(struct net *net, u32 index)
349 {
350 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
351 }
352 
353 static struct hlist_head *policy_hash_bysel(struct net *net,
354 					    const struct xfrm_selector *sel,
355 					    unsigned short family, int dir)
356 {
357 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
358 	unsigned int hash = __sel_hash(sel, family, hmask);
359 
360 	return (hash == hmask + 1 ?
361 		&net->xfrm.policy_inexact[dir] :
362 		net->xfrm.policy_bydst[dir].table + hash);
363 }
364 
365 static struct hlist_head *policy_hash_direct(struct net *net,
366 					     const xfrm_address_t *daddr,
367 					     const xfrm_address_t *saddr,
368 					     unsigned short family, int dir)
369 {
370 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
371 	unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
372 
373 	return net->xfrm.policy_bydst[dir].table + hash;
374 }
375 
376 static void xfrm_dst_hash_transfer(struct hlist_head *list,
377 				   struct hlist_head *ndsttable,
378 				   unsigned int nhashmask)
379 {
380 	struct hlist_node *tmp, *entry0 = NULL;
381 	struct xfrm_policy *pol;
382 	unsigned int h0 = 0;
383 
384 redo:
385 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
386 		unsigned int h;
387 
388 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
389 				pol->family, nhashmask);
390 		if (!entry0) {
391 			hlist_del(&pol->bydst);
392 			hlist_add_head(&pol->bydst, ndsttable+h);
393 			h0 = h;
394 		} else {
395 			if (h != h0)
396 				continue;
397 			hlist_del(&pol->bydst);
398 			hlist_add_after(entry0, &pol->bydst);
399 		}
400 		entry0 = &pol->bydst;
401 	}
402 	if (!hlist_empty(list)) {
403 		entry0 = NULL;
404 		goto redo;
405 	}
406 }
407 
408 static void xfrm_idx_hash_transfer(struct hlist_head *list,
409 				   struct hlist_head *nidxtable,
410 				   unsigned int nhashmask)
411 {
412 	struct hlist_node *tmp;
413 	struct xfrm_policy *pol;
414 
415 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
416 		unsigned int h;
417 
418 		h = __idx_hash(pol->index, nhashmask);
419 		hlist_add_head(&pol->byidx, nidxtable+h);
420 	}
421 }
422 
423 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
424 {
425 	return ((old_hmask + 1) << 1) - 1;
426 }
427 
428 static void xfrm_bydst_resize(struct net *net, int dir)
429 {
430 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
431 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
432 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
433 	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
434 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
435 	int i;
436 
437 	if (!ndst)
438 		return;
439 
440 	write_lock_bh(&xfrm_policy_lock);
441 
442 	for (i = hmask; i >= 0; i--)
443 		xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
444 
445 	net->xfrm.policy_bydst[dir].table = ndst;
446 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
447 
448 	write_unlock_bh(&xfrm_policy_lock);
449 
450 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
451 }
452 
453 static void xfrm_byidx_resize(struct net *net, int total)
454 {
455 	unsigned int hmask = net->xfrm.policy_idx_hmask;
456 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
457 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
458 	struct hlist_head *oidx = net->xfrm.policy_byidx;
459 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
460 	int i;
461 
462 	if (!nidx)
463 		return;
464 
465 	write_lock_bh(&xfrm_policy_lock);
466 
467 	for (i = hmask; i >= 0; i--)
468 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
469 
470 	net->xfrm.policy_byidx = nidx;
471 	net->xfrm.policy_idx_hmask = nhashmask;
472 
473 	write_unlock_bh(&xfrm_policy_lock);
474 
475 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
476 }
477 
478 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
479 {
480 	unsigned int cnt = net->xfrm.policy_count[dir];
481 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
482 
483 	if (total)
484 		*total += cnt;
485 
486 	if ((hmask + 1) < xfrm_policy_hashmax &&
487 	    cnt > hmask)
488 		return 1;
489 
490 	return 0;
491 }
492 
493 static inline int xfrm_byidx_should_resize(struct net *net, int total)
494 {
495 	unsigned int hmask = net->xfrm.policy_idx_hmask;
496 
497 	if ((hmask + 1) < xfrm_policy_hashmax &&
498 	    total > hmask)
499 		return 1;
500 
501 	return 0;
502 }
503 
504 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
505 {
506 	read_lock_bh(&xfrm_policy_lock);
507 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
508 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
509 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
510 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
511 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
512 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
513 	si->spdhcnt = net->xfrm.policy_idx_hmask;
514 	si->spdhmcnt = xfrm_policy_hashmax;
515 	read_unlock_bh(&xfrm_policy_lock);
516 }
517 EXPORT_SYMBOL(xfrm_spd_getinfo);
518 
519 static DEFINE_MUTEX(hash_resize_mutex);
520 static void xfrm_hash_resize(struct work_struct *work)
521 {
522 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
523 	int dir, total;
524 
525 	mutex_lock(&hash_resize_mutex);
526 
527 	total = 0;
528 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
529 		if (xfrm_bydst_should_resize(net, dir, &total))
530 			xfrm_bydst_resize(net, dir);
531 	}
532 	if (xfrm_byidx_should_resize(net, total))
533 		xfrm_byidx_resize(net, total);
534 
535 	mutex_unlock(&hash_resize_mutex);
536 }
537 
538 /* Generate new index... KAME seems to generate them ordered by cost
539  * of an absolute inpredictability of ordering of rules. This will not pass. */
540 static u32 xfrm_gen_index(struct net *net, int dir)
541 {
542 	static u32 idx_generator;
543 
544 	for (;;) {
545 		struct hlist_head *list;
546 		struct xfrm_policy *p;
547 		u32 idx;
548 		int found;
549 
550 		idx = (idx_generator | dir);
551 		idx_generator += 8;
552 		if (idx == 0)
553 			idx = 8;
554 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
555 		found = 0;
556 		hlist_for_each_entry(p, list, byidx) {
557 			if (p->index == idx) {
558 				found = 1;
559 				break;
560 			}
561 		}
562 		if (!found)
563 			return idx;
564 	}
565 }
566 
567 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
568 {
569 	u32 *p1 = (u32 *) s1;
570 	u32 *p2 = (u32 *) s2;
571 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
572 	int i;
573 
574 	for (i = 0; i < len; i++) {
575 		if (p1[i] != p2[i])
576 			return 1;
577 	}
578 
579 	return 0;
580 }
581 
582 static void xfrm_policy_requeue(struct xfrm_policy *old,
583 				struct xfrm_policy *new)
584 {
585 	struct xfrm_policy_queue *pq = &old->polq;
586 	struct sk_buff_head list;
587 
588 	__skb_queue_head_init(&list);
589 
590 	spin_lock_bh(&pq->hold_queue.lock);
591 	skb_queue_splice_init(&pq->hold_queue, &list);
592 	del_timer(&pq->hold_timer);
593 	spin_unlock_bh(&pq->hold_queue.lock);
594 
595 	if (skb_queue_empty(&list))
596 		return;
597 
598 	pq = &new->polq;
599 
600 	spin_lock_bh(&pq->hold_queue.lock);
601 	skb_queue_splice(&list, &pq->hold_queue);
602 	pq->timeout = XFRM_QUEUE_TMO_MIN;
603 	mod_timer(&pq->hold_timer, jiffies);
604 	spin_unlock_bh(&pq->hold_queue.lock);
605 }
606 
607 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
608 				   struct xfrm_policy *pol)
609 {
610 	u32 mark = policy->mark.v & policy->mark.m;
611 
612 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
613 		return true;
614 
615 	if ((mark & pol->mark.m) == pol->mark.v &&
616 	    policy->priority == pol->priority)
617 		return true;
618 
619 	return false;
620 }
621 
622 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
623 {
624 	struct net *net = xp_net(policy);
625 	struct xfrm_policy *pol;
626 	struct xfrm_policy *delpol;
627 	struct hlist_head *chain;
628 	struct hlist_node *newpos;
629 
630 	write_lock_bh(&xfrm_policy_lock);
631 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
632 	delpol = NULL;
633 	newpos = NULL;
634 	hlist_for_each_entry(pol, chain, bydst) {
635 		if (pol->type == policy->type &&
636 		    !selector_cmp(&pol->selector, &policy->selector) &&
637 		    xfrm_policy_mark_match(policy, pol) &&
638 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
639 		    !WARN_ON(delpol)) {
640 			if (excl) {
641 				write_unlock_bh(&xfrm_policy_lock);
642 				return -EEXIST;
643 			}
644 			delpol = pol;
645 			if (policy->priority > pol->priority)
646 				continue;
647 		} else if (policy->priority >= pol->priority) {
648 			newpos = &pol->bydst;
649 			continue;
650 		}
651 		if (delpol)
652 			break;
653 	}
654 	if (newpos)
655 		hlist_add_after(newpos, &policy->bydst);
656 	else
657 		hlist_add_head(&policy->bydst, chain);
658 	xfrm_pol_hold(policy);
659 	net->xfrm.policy_count[dir]++;
660 	atomic_inc(&flow_cache_genid);
661 
662 	/* After previous checking, family can either be AF_INET or AF_INET6 */
663 	if (policy->family == AF_INET)
664 		rt_genid_bump_ipv4(net);
665 	else
666 		rt_genid_bump_ipv6(net);
667 
668 	if (delpol) {
669 		xfrm_policy_requeue(delpol, policy);
670 		__xfrm_policy_unlink(delpol, dir);
671 	}
672 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
673 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
674 	policy->curlft.add_time = get_seconds();
675 	policy->curlft.use_time = 0;
676 	if (!mod_timer(&policy->timer, jiffies + HZ))
677 		xfrm_pol_hold(policy);
678 	list_add(&policy->walk.all, &net->xfrm.policy_all);
679 	write_unlock_bh(&xfrm_policy_lock);
680 
681 	if (delpol)
682 		xfrm_policy_kill(delpol);
683 	else if (xfrm_bydst_should_resize(net, dir, NULL))
684 		schedule_work(&net->xfrm.policy_hash_work);
685 
686 	return 0;
687 }
688 EXPORT_SYMBOL(xfrm_policy_insert);
689 
690 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
691 					  int dir, struct xfrm_selector *sel,
692 					  struct xfrm_sec_ctx *ctx, int delete,
693 					  int *err)
694 {
695 	struct xfrm_policy *pol, *ret;
696 	struct hlist_head *chain;
697 
698 	*err = 0;
699 	write_lock_bh(&xfrm_policy_lock);
700 	chain = policy_hash_bysel(net, sel, sel->family, dir);
701 	ret = NULL;
702 	hlist_for_each_entry(pol, chain, bydst) {
703 		if (pol->type == type &&
704 		    (mark & pol->mark.m) == pol->mark.v &&
705 		    !selector_cmp(sel, &pol->selector) &&
706 		    xfrm_sec_ctx_match(ctx, pol->security)) {
707 			xfrm_pol_hold(pol);
708 			if (delete) {
709 				*err = security_xfrm_policy_delete(
710 								pol->security);
711 				if (*err) {
712 					write_unlock_bh(&xfrm_policy_lock);
713 					return pol;
714 				}
715 				__xfrm_policy_unlink(pol, dir);
716 			}
717 			ret = pol;
718 			break;
719 		}
720 	}
721 	write_unlock_bh(&xfrm_policy_lock);
722 
723 	if (ret && delete)
724 		xfrm_policy_kill(ret);
725 	return ret;
726 }
727 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
728 
729 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
730 				     int dir, u32 id, int delete, int *err)
731 {
732 	struct xfrm_policy *pol, *ret;
733 	struct hlist_head *chain;
734 
735 	*err = -ENOENT;
736 	if (xfrm_policy_id2dir(id) != dir)
737 		return NULL;
738 
739 	*err = 0;
740 	write_lock_bh(&xfrm_policy_lock);
741 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
742 	ret = NULL;
743 	hlist_for_each_entry(pol, chain, byidx) {
744 		if (pol->type == type && pol->index == id &&
745 		    (mark & pol->mark.m) == pol->mark.v) {
746 			xfrm_pol_hold(pol);
747 			if (delete) {
748 				*err = security_xfrm_policy_delete(
749 								pol->security);
750 				if (*err) {
751 					write_unlock_bh(&xfrm_policy_lock);
752 					return pol;
753 				}
754 				__xfrm_policy_unlink(pol, dir);
755 			}
756 			ret = pol;
757 			break;
758 		}
759 	}
760 	write_unlock_bh(&xfrm_policy_lock);
761 
762 	if (ret && delete)
763 		xfrm_policy_kill(ret);
764 	return ret;
765 }
766 EXPORT_SYMBOL(xfrm_policy_byid);
767 
768 #ifdef CONFIG_SECURITY_NETWORK_XFRM
769 static inline int
770 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
771 {
772 	int dir, err = 0;
773 
774 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
775 		struct xfrm_policy *pol;
776 		int i;
777 
778 		hlist_for_each_entry(pol,
779 				     &net->xfrm.policy_inexact[dir], bydst) {
780 			if (pol->type != type)
781 				continue;
782 			err = security_xfrm_policy_delete(pol->security);
783 			if (err) {
784 				xfrm_audit_policy_delete(pol, 0,
785 							 audit_info->loginuid,
786 							 audit_info->sessionid,
787 							 audit_info->secid);
788 				return err;
789 			}
790 		}
791 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
792 			hlist_for_each_entry(pol,
793 					     net->xfrm.policy_bydst[dir].table + i,
794 					     bydst) {
795 				if (pol->type != type)
796 					continue;
797 				err = security_xfrm_policy_delete(
798 								pol->security);
799 				if (err) {
800 					xfrm_audit_policy_delete(pol, 0,
801 							audit_info->loginuid,
802 							audit_info->sessionid,
803 							audit_info->secid);
804 					return err;
805 				}
806 			}
807 		}
808 	}
809 	return err;
810 }
811 #else
812 static inline int
813 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
814 {
815 	return 0;
816 }
817 #endif
818 
819 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
820 {
821 	int dir, err = 0, cnt = 0;
822 
823 	write_lock_bh(&xfrm_policy_lock);
824 
825 	err = xfrm_policy_flush_secctx_check(net, type, audit_info);
826 	if (err)
827 		goto out;
828 
829 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
830 		struct xfrm_policy *pol;
831 		int i;
832 
833 	again1:
834 		hlist_for_each_entry(pol,
835 				     &net->xfrm.policy_inexact[dir], bydst) {
836 			if (pol->type != type)
837 				continue;
838 			__xfrm_policy_unlink(pol, dir);
839 			write_unlock_bh(&xfrm_policy_lock);
840 			cnt++;
841 
842 			xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
843 						 audit_info->sessionid,
844 						 audit_info->secid);
845 
846 			xfrm_policy_kill(pol);
847 
848 			write_lock_bh(&xfrm_policy_lock);
849 			goto again1;
850 		}
851 
852 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
853 	again2:
854 			hlist_for_each_entry(pol,
855 					     net->xfrm.policy_bydst[dir].table + i,
856 					     bydst) {
857 				if (pol->type != type)
858 					continue;
859 				__xfrm_policy_unlink(pol, dir);
860 				write_unlock_bh(&xfrm_policy_lock);
861 				cnt++;
862 
863 				xfrm_audit_policy_delete(pol, 1,
864 							 audit_info->loginuid,
865 							 audit_info->sessionid,
866 							 audit_info->secid);
867 				xfrm_policy_kill(pol);
868 
869 				write_lock_bh(&xfrm_policy_lock);
870 				goto again2;
871 			}
872 		}
873 
874 	}
875 	if (!cnt)
876 		err = -ESRCH;
877 out:
878 	write_unlock_bh(&xfrm_policy_lock);
879 	return err;
880 }
881 EXPORT_SYMBOL(xfrm_policy_flush);
882 
883 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
884 		     int (*func)(struct xfrm_policy *, int, int, void*),
885 		     void *data)
886 {
887 	struct xfrm_policy *pol;
888 	struct xfrm_policy_walk_entry *x;
889 	int error = 0;
890 
891 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
892 	    walk->type != XFRM_POLICY_TYPE_ANY)
893 		return -EINVAL;
894 
895 	if (list_empty(&walk->walk.all) && walk->seq != 0)
896 		return 0;
897 
898 	write_lock_bh(&xfrm_policy_lock);
899 	if (list_empty(&walk->walk.all))
900 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
901 	else
902 		x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
903 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
904 		if (x->dead)
905 			continue;
906 		pol = container_of(x, struct xfrm_policy, walk);
907 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
908 		    walk->type != pol->type)
909 			continue;
910 		error = func(pol, xfrm_policy_id2dir(pol->index),
911 			     walk->seq, data);
912 		if (error) {
913 			list_move_tail(&walk->walk.all, &x->all);
914 			goto out;
915 		}
916 		walk->seq++;
917 	}
918 	if (walk->seq == 0) {
919 		error = -ENOENT;
920 		goto out;
921 	}
922 	list_del_init(&walk->walk.all);
923 out:
924 	write_unlock_bh(&xfrm_policy_lock);
925 	return error;
926 }
927 EXPORT_SYMBOL(xfrm_policy_walk);
928 
929 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
930 {
931 	INIT_LIST_HEAD(&walk->walk.all);
932 	walk->walk.dead = 1;
933 	walk->type = type;
934 	walk->seq = 0;
935 }
936 EXPORT_SYMBOL(xfrm_policy_walk_init);
937 
938 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
939 {
940 	if (list_empty(&walk->walk.all))
941 		return;
942 
943 	write_lock_bh(&xfrm_policy_lock);
944 	list_del(&walk->walk.all);
945 	write_unlock_bh(&xfrm_policy_lock);
946 }
947 EXPORT_SYMBOL(xfrm_policy_walk_done);
948 
949 /*
950  * Find policy to apply to this flow.
951  *
952  * Returns 0 if policy found, else an -errno.
953  */
954 static int xfrm_policy_match(const struct xfrm_policy *pol,
955 			     const struct flowi *fl,
956 			     u8 type, u16 family, int dir)
957 {
958 	const struct xfrm_selector *sel = &pol->selector;
959 	int ret = -ESRCH;
960 	bool match;
961 
962 	if (pol->family != family ||
963 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
964 	    pol->type != type)
965 		return ret;
966 
967 	match = xfrm_selector_match(sel, fl, family);
968 	if (match)
969 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
970 						  dir);
971 
972 	return ret;
973 }
974 
975 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
976 						     const struct flowi *fl,
977 						     u16 family, u8 dir)
978 {
979 	int err;
980 	struct xfrm_policy *pol, *ret;
981 	const xfrm_address_t *daddr, *saddr;
982 	struct hlist_head *chain;
983 	u32 priority = ~0U;
984 
985 	daddr = xfrm_flowi_daddr(fl, family);
986 	saddr = xfrm_flowi_saddr(fl, family);
987 	if (unlikely(!daddr || !saddr))
988 		return NULL;
989 
990 	read_lock_bh(&xfrm_policy_lock);
991 	chain = policy_hash_direct(net, daddr, saddr, family, dir);
992 	ret = NULL;
993 	hlist_for_each_entry(pol, chain, bydst) {
994 		err = xfrm_policy_match(pol, fl, type, family, dir);
995 		if (err) {
996 			if (err == -ESRCH)
997 				continue;
998 			else {
999 				ret = ERR_PTR(err);
1000 				goto fail;
1001 			}
1002 		} else {
1003 			ret = pol;
1004 			priority = ret->priority;
1005 			break;
1006 		}
1007 	}
1008 	chain = &net->xfrm.policy_inexact[dir];
1009 	hlist_for_each_entry(pol, chain, bydst) {
1010 		err = xfrm_policy_match(pol, fl, type, family, dir);
1011 		if (err) {
1012 			if (err == -ESRCH)
1013 				continue;
1014 			else {
1015 				ret = ERR_PTR(err);
1016 				goto fail;
1017 			}
1018 		} else if (pol->priority < priority) {
1019 			ret = pol;
1020 			break;
1021 		}
1022 	}
1023 	if (ret)
1024 		xfrm_pol_hold(ret);
1025 fail:
1026 	read_unlock_bh(&xfrm_policy_lock);
1027 
1028 	return ret;
1029 }
1030 
1031 static struct xfrm_policy *
1032 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1033 {
1034 #ifdef CONFIG_XFRM_SUB_POLICY
1035 	struct xfrm_policy *pol;
1036 
1037 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1038 	if (pol != NULL)
1039 		return pol;
1040 #endif
1041 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1042 }
1043 
1044 static int flow_to_policy_dir(int dir)
1045 {
1046 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1047 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1048 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1049 		return dir;
1050 
1051 	switch (dir) {
1052 	default:
1053 	case FLOW_DIR_IN:
1054 		return XFRM_POLICY_IN;
1055 	case FLOW_DIR_OUT:
1056 		return XFRM_POLICY_OUT;
1057 	case FLOW_DIR_FWD:
1058 		return XFRM_POLICY_FWD;
1059 	}
1060 }
1061 
1062 static struct flow_cache_object *
1063 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1064 		   u8 dir, struct flow_cache_object *old_obj, void *ctx)
1065 {
1066 	struct xfrm_policy *pol;
1067 
1068 	if (old_obj)
1069 		xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1070 
1071 	pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1072 	if (IS_ERR_OR_NULL(pol))
1073 		return ERR_CAST(pol);
1074 
1075 	/* Resolver returns two references:
1076 	 * one for cache and one for caller of flow_cache_lookup() */
1077 	xfrm_pol_hold(pol);
1078 
1079 	return &pol->flo;
1080 }
1081 
1082 static inline int policy_to_flow_dir(int dir)
1083 {
1084 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1085 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1086 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1087 		return dir;
1088 	switch (dir) {
1089 	default:
1090 	case XFRM_POLICY_IN:
1091 		return FLOW_DIR_IN;
1092 	case XFRM_POLICY_OUT:
1093 		return FLOW_DIR_OUT;
1094 	case XFRM_POLICY_FWD:
1095 		return FLOW_DIR_FWD;
1096 	}
1097 }
1098 
1099 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1100 						 const struct flowi *fl)
1101 {
1102 	struct xfrm_policy *pol;
1103 
1104 	read_lock_bh(&xfrm_policy_lock);
1105 	if ((pol = sk->sk_policy[dir]) != NULL) {
1106 		bool match = xfrm_selector_match(&pol->selector, fl,
1107 						 sk->sk_family);
1108 		int err = 0;
1109 
1110 		if (match) {
1111 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1112 				pol = NULL;
1113 				goto out;
1114 			}
1115 			err = security_xfrm_policy_lookup(pol->security,
1116 						      fl->flowi_secid,
1117 						      policy_to_flow_dir(dir));
1118 			if (!err)
1119 				xfrm_pol_hold(pol);
1120 			else if (err == -ESRCH)
1121 				pol = NULL;
1122 			else
1123 				pol = ERR_PTR(err);
1124 		} else
1125 			pol = NULL;
1126 	}
1127 out:
1128 	read_unlock_bh(&xfrm_policy_lock);
1129 	return pol;
1130 }
1131 
1132 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1133 {
1134 	struct net *net = xp_net(pol);
1135 	struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1136 						     pol->family, dir);
1137 
1138 	list_add(&pol->walk.all, &net->xfrm.policy_all);
1139 	hlist_add_head(&pol->bydst, chain);
1140 	hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1141 	net->xfrm.policy_count[dir]++;
1142 	xfrm_pol_hold(pol);
1143 
1144 	if (xfrm_bydst_should_resize(net, dir, NULL))
1145 		schedule_work(&net->xfrm.policy_hash_work);
1146 }
1147 
1148 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1149 						int dir)
1150 {
1151 	struct net *net = xp_net(pol);
1152 
1153 	if (hlist_unhashed(&pol->bydst))
1154 		return NULL;
1155 
1156 	hlist_del(&pol->bydst);
1157 	hlist_del(&pol->byidx);
1158 	list_del(&pol->walk.all);
1159 	net->xfrm.policy_count[dir]--;
1160 
1161 	return pol;
1162 }
1163 
1164 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1165 {
1166 	write_lock_bh(&xfrm_policy_lock);
1167 	pol = __xfrm_policy_unlink(pol, dir);
1168 	write_unlock_bh(&xfrm_policy_lock);
1169 	if (pol) {
1170 		xfrm_policy_kill(pol);
1171 		return 0;
1172 	}
1173 	return -ENOENT;
1174 }
1175 EXPORT_SYMBOL(xfrm_policy_delete);
1176 
1177 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1178 {
1179 	struct net *net = xp_net(pol);
1180 	struct xfrm_policy *old_pol;
1181 
1182 #ifdef CONFIG_XFRM_SUB_POLICY
1183 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1184 		return -EINVAL;
1185 #endif
1186 
1187 	write_lock_bh(&xfrm_policy_lock);
1188 	old_pol = sk->sk_policy[dir];
1189 	sk->sk_policy[dir] = pol;
1190 	if (pol) {
1191 		pol->curlft.add_time = get_seconds();
1192 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1193 		__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1194 	}
1195 	if (old_pol) {
1196 		if (pol)
1197 			xfrm_policy_requeue(old_pol, pol);
1198 
1199 		/* Unlinking succeeds always. This is the only function
1200 		 * allowed to delete or replace socket policy.
1201 		 */
1202 		__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1203 	}
1204 	write_unlock_bh(&xfrm_policy_lock);
1205 
1206 	if (old_pol) {
1207 		xfrm_policy_kill(old_pol);
1208 	}
1209 	return 0;
1210 }
1211 
1212 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1213 {
1214 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1215 
1216 	if (newp) {
1217 		newp->selector = old->selector;
1218 		if (security_xfrm_policy_clone(old->security,
1219 					       &newp->security)) {
1220 			kfree(newp);
1221 			return NULL;  /* ENOMEM */
1222 		}
1223 		newp->lft = old->lft;
1224 		newp->curlft = old->curlft;
1225 		newp->mark = old->mark;
1226 		newp->action = old->action;
1227 		newp->flags = old->flags;
1228 		newp->xfrm_nr = old->xfrm_nr;
1229 		newp->index = old->index;
1230 		newp->type = old->type;
1231 		memcpy(newp->xfrm_vec, old->xfrm_vec,
1232 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1233 		write_lock_bh(&xfrm_policy_lock);
1234 		__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1235 		write_unlock_bh(&xfrm_policy_lock);
1236 		xfrm_pol_put(newp);
1237 	}
1238 	return newp;
1239 }
1240 
1241 int __xfrm_sk_clone_policy(struct sock *sk)
1242 {
1243 	struct xfrm_policy *p0 = sk->sk_policy[0],
1244 			   *p1 = sk->sk_policy[1];
1245 
1246 	sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1247 	if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1248 		return -ENOMEM;
1249 	if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1250 		return -ENOMEM;
1251 	return 0;
1252 }
1253 
1254 static int
1255 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1256 	       unsigned short family)
1257 {
1258 	int err;
1259 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1260 
1261 	if (unlikely(afinfo == NULL))
1262 		return -EINVAL;
1263 	err = afinfo->get_saddr(net, local, remote);
1264 	xfrm_policy_put_afinfo(afinfo);
1265 	return err;
1266 }
1267 
1268 /* Resolve list of templates for the flow, given policy. */
1269 
1270 static int
1271 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1272 		      struct xfrm_state **xfrm, unsigned short family)
1273 {
1274 	struct net *net = xp_net(policy);
1275 	int nx;
1276 	int i, error;
1277 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1278 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1279 	xfrm_address_t tmp;
1280 
1281 	for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1282 		struct xfrm_state *x;
1283 		xfrm_address_t *remote = daddr;
1284 		xfrm_address_t *local  = saddr;
1285 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1286 
1287 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1288 		    tmpl->mode == XFRM_MODE_BEET) {
1289 			remote = &tmpl->id.daddr;
1290 			local = &tmpl->saddr;
1291 			if (xfrm_addr_any(local, tmpl->encap_family)) {
1292 				error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1293 				if (error)
1294 					goto fail;
1295 				local = &tmp;
1296 			}
1297 		}
1298 
1299 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1300 
1301 		if (x && x->km.state == XFRM_STATE_VALID) {
1302 			xfrm[nx++] = x;
1303 			daddr = remote;
1304 			saddr = local;
1305 			continue;
1306 		}
1307 		if (x) {
1308 			error = (x->km.state == XFRM_STATE_ERROR ?
1309 				 -EINVAL : -EAGAIN);
1310 			xfrm_state_put(x);
1311 		}
1312 		else if (error == -ESRCH)
1313 			error = -EAGAIN;
1314 
1315 		if (!tmpl->optional)
1316 			goto fail;
1317 	}
1318 	return nx;
1319 
1320 fail:
1321 	for (nx--; nx>=0; nx--)
1322 		xfrm_state_put(xfrm[nx]);
1323 	return error;
1324 }
1325 
1326 static int
1327 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1328 		  struct xfrm_state **xfrm, unsigned short family)
1329 {
1330 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1331 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1332 	int cnx = 0;
1333 	int error;
1334 	int ret;
1335 	int i;
1336 
1337 	for (i = 0; i < npols; i++) {
1338 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1339 			error = -ENOBUFS;
1340 			goto fail;
1341 		}
1342 
1343 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1344 		if (ret < 0) {
1345 			error = ret;
1346 			goto fail;
1347 		} else
1348 			cnx += ret;
1349 	}
1350 
1351 	/* found states are sorted for outbound processing */
1352 	if (npols > 1)
1353 		xfrm_state_sort(xfrm, tpp, cnx, family);
1354 
1355 	return cnx;
1356 
1357  fail:
1358 	for (cnx--; cnx>=0; cnx--)
1359 		xfrm_state_put(tpp[cnx]);
1360 	return error;
1361 
1362 }
1363 
1364 /* Check that the bundle accepts the flow and its components are
1365  * still valid.
1366  */
1367 
1368 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1369 {
1370 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1371 	int tos;
1372 
1373 	if (!afinfo)
1374 		return -EINVAL;
1375 
1376 	tos = afinfo->get_tos(fl);
1377 
1378 	xfrm_policy_put_afinfo(afinfo);
1379 
1380 	return tos;
1381 }
1382 
1383 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1384 {
1385 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1386 	struct dst_entry *dst = &xdst->u.dst;
1387 
1388 	if (xdst->route == NULL) {
1389 		/* Dummy bundle - if it has xfrms we were not
1390 		 * able to build bundle as template resolution failed.
1391 		 * It means we need to try again resolving. */
1392 		if (xdst->num_xfrms > 0)
1393 			return NULL;
1394 	} else if (dst->flags & DST_XFRM_QUEUE) {
1395 		return NULL;
1396 	} else {
1397 		/* Real bundle */
1398 		if (stale_bundle(dst))
1399 			return NULL;
1400 	}
1401 
1402 	dst_hold(dst);
1403 	return flo;
1404 }
1405 
1406 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1407 {
1408 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1409 	struct dst_entry *dst = &xdst->u.dst;
1410 
1411 	if (!xdst->route)
1412 		return 0;
1413 	if (stale_bundle(dst))
1414 		return 0;
1415 
1416 	return 1;
1417 }
1418 
1419 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1420 {
1421 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1422 	struct dst_entry *dst = &xdst->u.dst;
1423 
1424 	dst_free(dst);
1425 }
1426 
1427 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1428 	.get = xfrm_bundle_flo_get,
1429 	.check = xfrm_bundle_flo_check,
1430 	.delete = xfrm_bundle_flo_delete,
1431 };
1432 
1433 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1434 {
1435 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1436 	struct dst_ops *dst_ops;
1437 	struct xfrm_dst *xdst;
1438 
1439 	if (!afinfo)
1440 		return ERR_PTR(-EINVAL);
1441 
1442 	switch (family) {
1443 	case AF_INET:
1444 		dst_ops = &net->xfrm.xfrm4_dst_ops;
1445 		break;
1446 #if IS_ENABLED(CONFIG_IPV6)
1447 	case AF_INET6:
1448 		dst_ops = &net->xfrm.xfrm6_dst_ops;
1449 		break;
1450 #endif
1451 	default:
1452 		BUG();
1453 	}
1454 	xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1455 
1456 	if (likely(xdst)) {
1457 		struct dst_entry *dst = &xdst->u.dst;
1458 
1459 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1460 		xdst->flo.ops = &xfrm_bundle_fc_ops;
1461 		if (afinfo->init_dst)
1462 			afinfo->init_dst(net, xdst);
1463 	} else
1464 		xdst = ERR_PTR(-ENOBUFS);
1465 
1466 	xfrm_policy_put_afinfo(afinfo);
1467 
1468 	return xdst;
1469 }
1470 
1471 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1472 				 int nfheader_len)
1473 {
1474 	struct xfrm_policy_afinfo *afinfo =
1475 		xfrm_policy_get_afinfo(dst->ops->family);
1476 	int err;
1477 
1478 	if (!afinfo)
1479 		return -EINVAL;
1480 
1481 	err = afinfo->init_path(path, dst, nfheader_len);
1482 
1483 	xfrm_policy_put_afinfo(afinfo);
1484 
1485 	return err;
1486 }
1487 
1488 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1489 				const struct flowi *fl)
1490 {
1491 	struct xfrm_policy_afinfo *afinfo =
1492 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1493 	int err;
1494 
1495 	if (!afinfo)
1496 		return -EINVAL;
1497 
1498 	err = afinfo->fill_dst(xdst, dev, fl);
1499 
1500 	xfrm_policy_put_afinfo(afinfo);
1501 
1502 	return err;
1503 }
1504 
1505 
1506 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1507  * all the metrics... Shortly, bundle a bundle.
1508  */
1509 
1510 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1511 					    struct xfrm_state **xfrm, int nx,
1512 					    const struct flowi *fl,
1513 					    struct dst_entry *dst)
1514 {
1515 	struct net *net = xp_net(policy);
1516 	unsigned long now = jiffies;
1517 	struct net_device *dev;
1518 	struct xfrm_mode *inner_mode;
1519 	struct dst_entry *dst_prev = NULL;
1520 	struct dst_entry *dst0 = NULL;
1521 	int i = 0;
1522 	int err;
1523 	int header_len = 0;
1524 	int nfheader_len = 0;
1525 	int trailer_len = 0;
1526 	int tos;
1527 	int family = policy->selector.family;
1528 	xfrm_address_t saddr, daddr;
1529 
1530 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1531 
1532 	tos = xfrm_get_tos(fl, family);
1533 	err = tos;
1534 	if (tos < 0)
1535 		goto put_states;
1536 
1537 	dst_hold(dst);
1538 
1539 	for (; i < nx; i++) {
1540 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1541 		struct dst_entry *dst1 = &xdst->u.dst;
1542 
1543 		err = PTR_ERR(xdst);
1544 		if (IS_ERR(xdst)) {
1545 			dst_release(dst);
1546 			goto put_states;
1547 		}
1548 
1549 		if (xfrm[i]->sel.family == AF_UNSPEC) {
1550 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1551 							xfrm_af2proto(family));
1552 			if (!inner_mode) {
1553 				err = -EAFNOSUPPORT;
1554 				dst_release(dst);
1555 				goto put_states;
1556 			}
1557 		} else
1558 			inner_mode = xfrm[i]->inner_mode;
1559 
1560 		if (!dst_prev)
1561 			dst0 = dst1;
1562 		else {
1563 			dst_prev->child = dst_clone(dst1);
1564 			dst1->flags |= DST_NOHASH;
1565 		}
1566 
1567 		xdst->route = dst;
1568 		dst_copy_metrics(dst1, dst);
1569 
1570 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1571 			family = xfrm[i]->props.family;
1572 			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1573 					      family);
1574 			err = PTR_ERR(dst);
1575 			if (IS_ERR(dst))
1576 				goto put_states;
1577 		} else
1578 			dst_hold(dst);
1579 
1580 		dst1->xfrm = xfrm[i];
1581 		xdst->xfrm_genid = xfrm[i]->genid;
1582 
1583 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1584 		dst1->flags |= DST_HOST;
1585 		dst1->lastuse = now;
1586 
1587 		dst1->input = dst_discard;
1588 		dst1->output = inner_mode->afinfo->output;
1589 
1590 		dst1->next = dst_prev;
1591 		dst_prev = dst1;
1592 
1593 		header_len += xfrm[i]->props.header_len;
1594 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1595 			nfheader_len += xfrm[i]->props.header_len;
1596 		trailer_len += xfrm[i]->props.trailer_len;
1597 	}
1598 
1599 	dst_prev->child = dst;
1600 	dst0->path = dst;
1601 
1602 	err = -ENODEV;
1603 	dev = dst->dev;
1604 	if (!dev)
1605 		goto free_dst;
1606 
1607 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1608 	xfrm_init_pmtu(dst_prev);
1609 
1610 	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1611 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1612 
1613 		err = xfrm_fill_dst(xdst, dev, fl);
1614 		if (err)
1615 			goto free_dst;
1616 
1617 		dst_prev->header_len = header_len;
1618 		dst_prev->trailer_len = trailer_len;
1619 		header_len -= xdst->u.dst.xfrm->props.header_len;
1620 		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1621 	}
1622 
1623 out:
1624 	return dst0;
1625 
1626 put_states:
1627 	for (; i < nx; i++)
1628 		xfrm_state_put(xfrm[i]);
1629 free_dst:
1630 	if (dst0)
1631 		dst_free(dst0);
1632 	dst0 = ERR_PTR(err);
1633 	goto out;
1634 }
1635 
1636 static int inline
1637 xfrm_dst_alloc_copy(void **target, const void *src, int size)
1638 {
1639 	if (!*target) {
1640 		*target = kmalloc(size, GFP_ATOMIC);
1641 		if (!*target)
1642 			return -ENOMEM;
1643 	}
1644 	memcpy(*target, src, size);
1645 	return 0;
1646 }
1647 
1648 static int inline
1649 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1650 {
1651 #ifdef CONFIG_XFRM_SUB_POLICY
1652 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1653 	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1654 				   sel, sizeof(*sel));
1655 #else
1656 	return 0;
1657 #endif
1658 }
1659 
1660 static int inline
1661 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1662 {
1663 #ifdef CONFIG_XFRM_SUB_POLICY
1664 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1665 	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1666 #else
1667 	return 0;
1668 #endif
1669 }
1670 
1671 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1672 				struct xfrm_policy **pols,
1673 				int *num_pols, int *num_xfrms)
1674 {
1675 	int i;
1676 
1677 	if (*num_pols == 0 || !pols[0]) {
1678 		*num_pols = 0;
1679 		*num_xfrms = 0;
1680 		return 0;
1681 	}
1682 	if (IS_ERR(pols[0]))
1683 		return PTR_ERR(pols[0]);
1684 
1685 	*num_xfrms = pols[0]->xfrm_nr;
1686 
1687 #ifdef CONFIG_XFRM_SUB_POLICY
1688 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1689 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1690 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1691 						    XFRM_POLICY_TYPE_MAIN,
1692 						    fl, family,
1693 						    XFRM_POLICY_OUT);
1694 		if (pols[1]) {
1695 			if (IS_ERR(pols[1])) {
1696 				xfrm_pols_put(pols, *num_pols);
1697 				return PTR_ERR(pols[1]);
1698 			}
1699 			(*num_pols) ++;
1700 			(*num_xfrms) += pols[1]->xfrm_nr;
1701 		}
1702 	}
1703 #endif
1704 	for (i = 0; i < *num_pols; i++) {
1705 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1706 			*num_xfrms = -1;
1707 			break;
1708 		}
1709 	}
1710 
1711 	return 0;
1712 
1713 }
1714 
1715 static struct xfrm_dst *
1716 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1717 			       const struct flowi *fl, u16 family,
1718 			       struct dst_entry *dst_orig)
1719 {
1720 	struct net *net = xp_net(pols[0]);
1721 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1722 	struct dst_entry *dst;
1723 	struct xfrm_dst *xdst;
1724 	int err;
1725 
1726 	/* Try to instantiate a bundle */
1727 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1728 	if (err <= 0) {
1729 		if (err != 0 && err != -EAGAIN)
1730 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1731 		return ERR_PTR(err);
1732 	}
1733 
1734 	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1735 	if (IS_ERR(dst)) {
1736 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1737 		return ERR_CAST(dst);
1738 	}
1739 
1740 	xdst = (struct xfrm_dst *)dst;
1741 	xdst->num_xfrms = err;
1742 	if (num_pols > 1)
1743 		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1744 	else
1745 		err = xfrm_dst_update_origin(dst, fl);
1746 	if (unlikely(err)) {
1747 		dst_free(dst);
1748 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1749 		return ERR_PTR(err);
1750 	}
1751 
1752 	xdst->num_pols = num_pols;
1753 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1754 	xdst->policy_genid = atomic_read(&pols[0]->genid);
1755 
1756 	return xdst;
1757 }
1758 
1759 static void xfrm_policy_queue_process(unsigned long arg)
1760 {
1761 	int err = 0;
1762 	struct sk_buff *skb;
1763 	struct sock *sk;
1764 	struct dst_entry *dst;
1765 	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1766 	struct xfrm_policy_queue *pq = &pol->polq;
1767 	struct flowi fl;
1768 	struct sk_buff_head list;
1769 
1770 	spin_lock(&pq->hold_queue.lock);
1771 	skb = skb_peek(&pq->hold_queue);
1772 	dst = skb_dst(skb);
1773 	sk = skb->sk;
1774 	xfrm_decode_session(skb, &fl, dst->ops->family);
1775 	spin_unlock(&pq->hold_queue.lock);
1776 
1777 	dst_hold(dst->path);
1778 	dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1779 			  sk, 0);
1780 	if (IS_ERR(dst))
1781 		goto purge_queue;
1782 
1783 	if (dst->flags & DST_XFRM_QUEUE) {
1784 		dst_release(dst);
1785 
1786 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1787 			goto purge_queue;
1788 
1789 		pq->timeout = pq->timeout << 1;
1790 		mod_timer(&pq->hold_timer, jiffies + pq->timeout);
1791 		return;
1792 	}
1793 
1794 	dst_release(dst);
1795 
1796 	__skb_queue_head_init(&list);
1797 
1798 	spin_lock(&pq->hold_queue.lock);
1799 	pq->timeout = 0;
1800 	skb_queue_splice_init(&pq->hold_queue, &list);
1801 	spin_unlock(&pq->hold_queue.lock);
1802 
1803 	while (!skb_queue_empty(&list)) {
1804 		skb = __skb_dequeue(&list);
1805 
1806 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1807 		dst_hold(skb_dst(skb)->path);
1808 		dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1809 				  &fl, skb->sk, 0);
1810 		if (IS_ERR(dst)) {
1811 			kfree_skb(skb);
1812 			continue;
1813 		}
1814 
1815 		nf_reset(skb);
1816 		skb_dst_drop(skb);
1817 		skb_dst_set(skb, dst);
1818 
1819 		err = dst_output(skb);
1820 	}
1821 
1822 	return;
1823 
1824 purge_queue:
1825 	pq->timeout = 0;
1826 	xfrm_queue_purge(&pq->hold_queue);
1827 }
1828 
1829 static int xdst_queue_output(struct sk_buff *skb)
1830 {
1831 	unsigned long sched_next;
1832 	struct dst_entry *dst = skb_dst(skb);
1833 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1834 	struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
1835 
1836 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1837 		kfree_skb(skb);
1838 		return -EAGAIN;
1839 	}
1840 
1841 	skb_dst_force(skb);
1842 
1843 	spin_lock_bh(&pq->hold_queue.lock);
1844 
1845 	if (!pq->timeout)
1846 		pq->timeout = XFRM_QUEUE_TMO_MIN;
1847 
1848 	sched_next = jiffies + pq->timeout;
1849 
1850 	if (del_timer(&pq->hold_timer)) {
1851 		if (time_before(pq->hold_timer.expires, sched_next))
1852 			sched_next = pq->hold_timer.expires;
1853 	}
1854 
1855 	__skb_queue_tail(&pq->hold_queue, skb);
1856 	mod_timer(&pq->hold_timer, sched_next);
1857 
1858 	spin_unlock_bh(&pq->hold_queue.lock);
1859 
1860 	return 0;
1861 }
1862 
1863 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1864 						 struct dst_entry *dst,
1865 						 const struct flowi *fl,
1866 						 int num_xfrms,
1867 						 u16 family)
1868 {
1869 	int err;
1870 	struct net_device *dev;
1871 	struct dst_entry *dst1;
1872 	struct xfrm_dst *xdst;
1873 
1874 	xdst = xfrm_alloc_dst(net, family);
1875 	if (IS_ERR(xdst))
1876 		return xdst;
1877 
1878 	if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1879 	    (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1880 		return xdst;
1881 
1882 	dst1 = &xdst->u.dst;
1883 	dst_hold(dst);
1884 	xdst->route = dst;
1885 
1886 	dst_copy_metrics(dst1, dst);
1887 
1888 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1889 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1890 	dst1->lastuse = jiffies;
1891 
1892 	dst1->input = dst_discard;
1893 	dst1->output = xdst_queue_output;
1894 
1895 	dst_hold(dst);
1896 	dst1->child = dst;
1897 	dst1->path = dst;
1898 
1899 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1900 
1901 	err = -ENODEV;
1902 	dev = dst->dev;
1903 	if (!dev)
1904 		goto free_dst;
1905 
1906 	err = xfrm_fill_dst(xdst, dev, fl);
1907 	if (err)
1908 		goto free_dst;
1909 
1910 out:
1911 	return xdst;
1912 
1913 free_dst:
1914 	dst_release(dst1);
1915 	xdst = ERR_PTR(err);
1916 	goto out;
1917 }
1918 
1919 static struct flow_cache_object *
1920 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1921 		   struct flow_cache_object *oldflo, void *ctx)
1922 {
1923 	struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1924 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1925 	struct xfrm_dst *xdst, *new_xdst;
1926 	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1927 
1928 	/* Check if the policies from old bundle are usable */
1929 	xdst = NULL;
1930 	if (oldflo) {
1931 		xdst = container_of(oldflo, struct xfrm_dst, flo);
1932 		num_pols = xdst->num_pols;
1933 		num_xfrms = xdst->num_xfrms;
1934 		pol_dead = 0;
1935 		for (i = 0; i < num_pols; i++) {
1936 			pols[i] = xdst->pols[i];
1937 			pol_dead |= pols[i]->walk.dead;
1938 		}
1939 		if (pol_dead) {
1940 			dst_free(&xdst->u.dst);
1941 			xdst = NULL;
1942 			num_pols = 0;
1943 			num_xfrms = 0;
1944 			oldflo = NULL;
1945 		}
1946 	}
1947 
1948 	/* Resolve policies to use if we couldn't get them from
1949 	 * previous cache entry */
1950 	if (xdst == NULL) {
1951 		num_pols = 1;
1952 		pols[0] = __xfrm_policy_lookup(net, fl, family,
1953 					       flow_to_policy_dir(dir));
1954 		err = xfrm_expand_policies(fl, family, pols,
1955 					   &num_pols, &num_xfrms);
1956 		if (err < 0)
1957 			goto inc_error;
1958 		if (num_pols == 0)
1959 			return NULL;
1960 		if (num_xfrms <= 0)
1961 			goto make_dummy_bundle;
1962 	}
1963 
1964 	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1965 	if (IS_ERR(new_xdst)) {
1966 		err = PTR_ERR(new_xdst);
1967 		if (err != -EAGAIN)
1968 			goto error;
1969 		if (oldflo == NULL)
1970 			goto make_dummy_bundle;
1971 		dst_hold(&xdst->u.dst);
1972 		return oldflo;
1973 	} else if (new_xdst == NULL) {
1974 		num_xfrms = 0;
1975 		if (oldflo == NULL)
1976 			goto make_dummy_bundle;
1977 		xdst->num_xfrms = 0;
1978 		dst_hold(&xdst->u.dst);
1979 		return oldflo;
1980 	}
1981 
1982 	/* Kill the previous bundle */
1983 	if (xdst) {
1984 		/* The policies were stolen for newly generated bundle */
1985 		xdst->num_pols = 0;
1986 		dst_free(&xdst->u.dst);
1987 	}
1988 
1989 	/* Flow cache does not have reference, it dst_free()'s,
1990 	 * but we do need to return one reference for original caller */
1991 	dst_hold(&new_xdst->u.dst);
1992 	return &new_xdst->flo;
1993 
1994 make_dummy_bundle:
1995 	/* We found policies, but there's no bundles to instantiate:
1996 	 * either because the policy blocks, has no transformations or
1997 	 * we could not build template (no xfrm_states).*/
1998 	xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
1999 	if (IS_ERR(xdst)) {
2000 		xfrm_pols_put(pols, num_pols);
2001 		return ERR_CAST(xdst);
2002 	}
2003 	xdst->num_pols = num_pols;
2004 	xdst->num_xfrms = num_xfrms;
2005 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
2006 
2007 	dst_hold(&xdst->u.dst);
2008 	return &xdst->flo;
2009 
2010 inc_error:
2011 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2012 error:
2013 	if (xdst != NULL)
2014 		dst_free(&xdst->u.dst);
2015 	else
2016 		xfrm_pols_put(pols, num_pols);
2017 	return ERR_PTR(err);
2018 }
2019 
2020 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2021 					struct dst_entry *dst_orig)
2022 {
2023 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2024 	struct dst_entry *ret;
2025 
2026 	if (!afinfo) {
2027 		dst_release(dst_orig);
2028 		return ERR_PTR(-EINVAL);
2029 	} else {
2030 		ret = afinfo->blackhole_route(net, dst_orig);
2031 	}
2032 	xfrm_policy_put_afinfo(afinfo);
2033 
2034 	return ret;
2035 }
2036 
2037 /* Main function: finds/creates a bundle for given flow.
2038  *
2039  * At the moment we eat a raw IP route. Mostly to speed up lookups
2040  * on interfaces with disabled IPsec.
2041  */
2042 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2043 			      const struct flowi *fl,
2044 			      struct sock *sk, int flags)
2045 {
2046 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2047 	struct flow_cache_object *flo;
2048 	struct xfrm_dst *xdst;
2049 	struct dst_entry *dst, *route;
2050 	u16 family = dst_orig->ops->family;
2051 	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2052 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2053 
2054 restart:
2055 	dst = NULL;
2056 	xdst = NULL;
2057 	route = NULL;
2058 
2059 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2060 		num_pols = 1;
2061 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
2062 		err = xfrm_expand_policies(fl, family, pols,
2063 					   &num_pols, &num_xfrms);
2064 		if (err < 0)
2065 			goto dropdst;
2066 
2067 		if (num_pols) {
2068 			if (num_xfrms <= 0) {
2069 				drop_pols = num_pols;
2070 				goto no_transform;
2071 			}
2072 
2073 			xdst = xfrm_resolve_and_create_bundle(
2074 					pols, num_pols, fl,
2075 					family, dst_orig);
2076 			if (IS_ERR(xdst)) {
2077 				xfrm_pols_put(pols, num_pols);
2078 				err = PTR_ERR(xdst);
2079 				goto dropdst;
2080 			} else if (xdst == NULL) {
2081 				num_xfrms = 0;
2082 				drop_pols = num_pols;
2083 				goto no_transform;
2084 			}
2085 
2086 			dst_hold(&xdst->u.dst);
2087 
2088 			spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2089 			xdst->u.dst.next = xfrm_policy_sk_bundles;
2090 			xfrm_policy_sk_bundles = &xdst->u.dst;
2091 			spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2092 
2093 			route = xdst->route;
2094 		}
2095 	}
2096 
2097 	if (xdst == NULL) {
2098 		/* To accelerate a bit...  */
2099 		if ((dst_orig->flags & DST_NOXFRM) ||
2100 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2101 			goto nopol;
2102 
2103 		flo = flow_cache_lookup(net, fl, family, dir,
2104 					xfrm_bundle_lookup, dst_orig);
2105 		if (flo == NULL)
2106 			goto nopol;
2107 		if (IS_ERR(flo)) {
2108 			err = PTR_ERR(flo);
2109 			goto dropdst;
2110 		}
2111 		xdst = container_of(flo, struct xfrm_dst, flo);
2112 
2113 		num_pols = xdst->num_pols;
2114 		num_xfrms = xdst->num_xfrms;
2115 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
2116 		route = xdst->route;
2117 	}
2118 
2119 	dst = &xdst->u.dst;
2120 	if (route == NULL && num_xfrms > 0) {
2121 		/* The only case when xfrm_bundle_lookup() returns a
2122 		 * bundle with null route, is when the template could
2123 		 * not be resolved. It means policies are there, but
2124 		 * bundle could not be created, since we don't yet
2125 		 * have the xfrm_state's. We need to wait for KM to
2126 		 * negotiate new SA's or bail out with error.*/
2127 		if (net->xfrm.sysctl_larval_drop) {
2128 			dst_release(dst);
2129 			xfrm_pols_put(pols, drop_pols);
2130 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2131 
2132 			return make_blackhole(net, family, dst_orig);
2133 		}
2134 		if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
2135 			DECLARE_WAITQUEUE(wait, current);
2136 
2137 			add_wait_queue(&net->xfrm.km_waitq, &wait);
2138 			set_current_state(TASK_INTERRUPTIBLE);
2139 			schedule();
2140 			set_current_state(TASK_RUNNING);
2141 			remove_wait_queue(&net->xfrm.km_waitq, &wait);
2142 
2143 			if (!signal_pending(current)) {
2144 				dst_release(dst);
2145 				goto restart;
2146 			}
2147 
2148 			err = -ERESTART;
2149 		} else
2150 			err = -EAGAIN;
2151 
2152 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2153 		goto error;
2154 	}
2155 
2156 no_transform:
2157 	if (num_pols == 0)
2158 		goto nopol;
2159 
2160 	if ((flags & XFRM_LOOKUP_ICMP) &&
2161 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2162 		err = -ENOENT;
2163 		goto error;
2164 	}
2165 
2166 	for (i = 0; i < num_pols; i++)
2167 		pols[i]->curlft.use_time = get_seconds();
2168 
2169 	if (num_xfrms < 0) {
2170 		/* Prohibit the flow */
2171 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2172 		err = -EPERM;
2173 		goto error;
2174 	} else if (num_xfrms > 0) {
2175 		/* Flow transformed */
2176 		dst_release(dst_orig);
2177 	} else {
2178 		/* Flow passes untransformed */
2179 		dst_release(dst);
2180 		dst = dst_orig;
2181 	}
2182 ok:
2183 	xfrm_pols_put(pols, drop_pols);
2184 	if (dst && dst->xfrm &&
2185 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2186 		dst->flags |= DST_XFRM_TUNNEL;
2187 	return dst;
2188 
2189 nopol:
2190 	if (!(flags & XFRM_LOOKUP_ICMP)) {
2191 		dst = dst_orig;
2192 		goto ok;
2193 	}
2194 	err = -ENOENT;
2195 error:
2196 	dst_release(dst);
2197 dropdst:
2198 	dst_release(dst_orig);
2199 	xfrm_pols_put(pols, drop_pols);
2200 	return ERR_PTR(err);
2201 }
2202 EXPORT_SYMBOL(xfrm_lookup);
2203 
2204 static inline int
2205 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2206 {
2207 	struct xfrm_state *x;
2208 
2209 	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2210 		return 0;
2211 	x = skb->sp->xvec[idx];
2212 	if (!x->type->reject)
2213 		return 0;
2214 	return x->type->reject(x, skb, fl);
2215 }
2216 
2217 /* When skb is transformed back to its "native" form, we have to
2218  * check policy restrictions. At the moment we make this in maximally
2219  * stupid way. Shame on me. :-) Of course, connected sockets must
2220  * have policy cached at them.
2221  */
2222 
2223 static inline int
2224 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2225 	      unsigned short family)
2226 {
2227 	if (xfrm_state_kern(x))
2228 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2229 	return	x->id.proto == tmpl->id.proto &&
2230 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2231 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2232 		x->props.mode == tmpl->mode &&
2233 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2234 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2235 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2236 		  xfrm_state_addr_cmp(tmpl, x, family));
2237 }
2238 
2239 /*
2240  * 0 or more than 0 is returned when validation is succeeded (either bypass
2241  * because of optional transport mode, or next index of the mathced secpath
2242  * state with the template.
2243  * -1 is returned when no matching template is found.
2244  * Otherwise "-2 - errored_index" is returned.
2245  */
2246 static inline int
2247 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2248 	       unsigned short family)
2249 {
2250 	int idx = start;
2251 
2252 	if (tmpl->optional) {
2253 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2254 			return start;
2255 	} else
2256 		start = -1;
2257 	for (; idx < sp->len; idx++) {
2258 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2259 			return ++idx;
2260 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2261 			if (start == -1)
2262 				start = -2-idx;
2263 			break;
2264 		}
2265 	}
2266 	return start;
2267 }
2268 
2269 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2270 			  unsigned int family, int reverse)
2271 {
2272 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2273 	int err;
2274 
2275 	if (unlikely(afinfo == NULL))
2276 		return -EAFNOSUPPORT;
2277 
2278 	afinfo->decode_session(skb, fl, reverse);
2279 	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2280 	xfrm_policy_put_afinfo(afinfo);
2281 	return err;
2282 }
2283 EXPORT_SYMBOL(__xfrm_decode_session);
2284 
2285 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2286 {
2287 	for (; k < sp->len; k++) {
2288 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2289 			*idxp = k;
2290 			return 1;
2291 		}
2292 	}
2293 
2294 	return 0;
2295 }
2296 
2297 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2298 			unsigned short family)
2299 {
2300 	struct net *net = dev_net(skb->dev);
2301 	struct xfrm_policy *pol;
2302 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2303 	int npols = 0;
2304 	int xfrm_nr;
2305 	int pi;
2306 	int reverse;
2307 	struct flowi fl;
2308 	u8 fl_dir;
2309 	int xerr_idx = -1;
2310 
2311 	reverse = dir & ~XFRM_POLICY_MASK;
2312 	dir &= XFRM_POLICY_MASK;
2313 	fl_dir = policy_to_flow_dir(dir);
2314 
2315 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2316 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2317 		return 0;
2318 	}
2319 
2320 	nf_nat_decode_session(skb, &fl, family);
2321 
2322 	/* First, check used SA against their selectors. */
2323 	if (skb->sp) {
2324 		int i;
2325 
2326 		for (i=skb->sp->len-1; i>=0; i--) {
2327 			struct xfrm_state *x = skb->sp->xvec[i];
2328 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2329 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2330 				return 0;
2331 			}
2332 		}
2333 	}
2334 
2335 	pol = NULL;
2336 	if (sk && sk->sk_policy[dir]) {
2337 		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2338 		if (IS_ERR(pol)) {
2339 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2340 			return 0;
2341 		}
2342 	}
2343 
2344 	if (!pol) {
2345 		struct flow_cache_object *flo;
2346 
2347 		flo = flow_cache_lookup(net, &fl, family, fl_dir,
2348 					xfrm_policy_lookup, NULL);
2349 		if (IS_ERR_OR_NULL(flo))
2350 			pol = ERR_CAST(flo);
2351 		else
2352 			pol = container_of(flo, struct xfrm_policy, flo);
2353 	}
2354 
2355 	if (IS_ERR(pol)) {
2356 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2357 		return 0;
2358 	}
2359 
2360 	if (!pol) {
2361 		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2362 			xfrm_secpath_reject(xerr_idx, skb, &fl);
2363 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2364 			return 0;
2365 		}
2366 		return 1;
2367 	}
2368 
2369 	pol->curlft.use_time = get_seconds();
2370 
2371 	pols[0] = pol;
2372 	npols ++;
2373 #ifdef CONFIG_XFRM_SUB_POLICY
2374 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2375 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2376 						    &fl, family,
2377 						    XFRM_POLICY_IN);
2378 		if (pols[1]) {
2379 			if (IS_ERR(pols[1])) {
2380 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2381 				return 0;
2382 			}
2383 			pols[1]->curlft.use_time = get_seconds();
2384 			npols ++;
2385 		}
2386 	}
2387 #endif
2388 
2389 	if (pol->action == XFRM_POLICY_ALLOW) {
2390 		struct sec_path *sp;
2391 		static struct sec_path dummy;
2392 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2393 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2394 		struct xfrm_tmpl **tpp = tp;
2395 		int ti = 0;
2396 		int i, k;
2397 
2398 		if ((sp = skb->sp) == NULL)
2399 			sp = &dummy;
2400 
2401 		for (pi = 0; pi < npols; pi++) {
2402 			if (pols[pi] != pol &&
2403 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2404 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2405 				goto reject;
2406 			}
2407 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2408 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2409 				goto reject_error;
2410 			}
2411 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2412 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2413 		}
2414 		xfrm_nr = ti;
2415 		if (npols > 1) {
2416 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2417 			tpp = stp;
2418 		}
2419 
2420 		/* For each tunnel xfrm, find the first matching tmpl.
2421 		 * For each tmpl before that, find corresponding xfrm.
2422 		 * Order is _important_. Later we will implement
2423 		 * some barriers, but at the moment barriers
2424 		 * are implied between each two transformations.
2425 		 */
2426 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2427 			k = xfrm_policy_ok(tpp[i], sp, k, family);
2428 			if (k < 0) {
2429 				if (k < -1)
2430 					/* "-2 - errored_index" returned */
2431 					xerr_idx = -(2+k);
2432 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2433 				goto reject;
2434 			}
2435 		}
2436 
2437 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2438 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2439 			goto reject;
2440 		}
2441 
2442 		xfrm_pols_put(pols, npols);
2443 		return 1;
2444 	}
2445 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2446 
2447 reject:
2448 	xfrm_secpath_reject(xerr_idx, skb, &fl);
2449 reject_error:
2450 	xfrm_pols_put(pols, npols);
2451 	return 0;
2452 }
2453 EXPORT_SYMBOL(__xfrm_policy_check);
2454 
2455 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2456 {
2457 	struct net *net = dev_net(skb->dev);
2458 	struct flowi fl;
2459 	struct dst_entry *dst;
2460 	int res = 1;
2461 
2462 	if (xfrm_decode_session(skb, &fl, family) < 0) {
2463 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2464 		return 0;
2465 	}
2466 
2467 	skb_dst_force(skb);
2468 
2469 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2470 	if (IS_ERR(dst)) {
2471 		res = 0;
2472 		dst = NULL;
2473 	}
2474 	skb_dst_set(skb, dst);
2475 	return res;
2476 }
2477 EXPORT_SYMBOL(__xfrm_route_forward);
2478 
2479 /* Optimize later using cookies and generation ids. */
2480 
2481 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2482 {
2483 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2484 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2485 	 * get validated by dst_ops->check on every use.  We do this
2486 	 * because when a normal route referenced by an XFRM dst is
2487 	 * obsoleted we do not go looking around for all parent
2488 	 * referencing XFRM dsts so that we can invalidate them.  It
2489 	 * is just too much work.  Instead we make the checks here on
2490 	 * every use.  For example:
2491 	 *
2492 	 *	XFRM dst A --> IPv4 dst X
2493 	 *
2494 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2495 	 * in this example).  If X is marked obsolete, "A" will not
2496 	 * notice.  That's what we are validating here via the
2497 	 * stale_bundle() check.
2498 	 *
2499 	 * When a policy's bundle is pruned, we dst_free() the XFRM
2500 	 * dst which causes it's ->obsolete field to be set to
2501 	 * DST_OBSOLETE_DEAD.  If an XFRM dst has been pruned like
2502 	 * this, we want to force a new route lookup.
2503 	 */
2504 	if (dst->obsolete < 0 && !stale_bundle(dst))
2505 		return dst;
2506 
2507 	return NULL;
2508 }
2509 
2510 static int stale_bundle(struct dst_entry *dst)
2511 {
2512 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2513 }
2514 
2515 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2516 {
2517 	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2518 		dst->dev = dev_net(dev)->loopback_dev;
2519 		dev_hold(dst->dev);
2520 		dev_put(dev);
2521 	}
2522 }
2523 EXPORT_SYMBOL(xfrm_dst_ifdown);
2524 
2525 static void xfrm_link_failure(struct sk_buff *skb)
2526 {
2527 	/* Impossible. Such dst must be popped before reaches point of failure. */
2528 }
2529 
2530 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2531 {
2532 	if (dst) {
2533 		if (dst->obsolete) {
2534 			dst_release(dst);
2535 			dst = NULL;
2536 		}
2537 	}
2538 	return dst;
2539 }
2540 
2541 static void __xfrm_garbage_collect(struct net *net)
2542 {
2543 	struct dst_entry *head, *next;
2544 
2545 	spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2546 	head = xfrm_policy_sk_bundles;
2547 	xfrm_policy_sk_bundles = NULL;
2548 	spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2549 
2550 	while (head) {
2551 		next = head->next;
2552 		dst_free(head);
2553 		head = next;
2554 	}
2555 }
2556 
2557 void xfrm_garbage_collect(struct net *net)
2558 {
2559 	flow_cache_flush();
2560 	__xfrm_garbage_collect(net);
2561 }
2562 EXPORT_SYMBOL(xfrm_garbage_collect);
2563 
2564 static void xfrm_garbage_collect_deferred(struct net *net)
2565 {
2566 	flow_cache_flush_deferred();
2567 	__xfrm_garbage_collect(net);
2568 }
2569 
2570 static void xfrm_init_pmtu(struct dst_entry *dst)
2571 {
2572 	do {
2573 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2574 		u32 pmtu, route_mtu_cached;
2575 
2576 		pmtu = dst_mtu(dst->child);
2577 		xdst->child_mtu_cached = pmtu;
2578 
2579 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2580 
2581 		route_mtu_cached = dst_mtu(xdst->route);
2582 		xdst->route_mtu_cached = route_mtu_cached;
2583 
2584 		if (pmtu > route_mtu_cached)
2585 			pmtu = route_mtu_cached;
2586 
2587 		dst_metric_set(dst, RTAX_MTU, pmtu);
2588 	} while ((dst = dst->next));
2589 }
2590 
2591 /* Check that the bundle accepts the flow and its components are
2592  * still valid.
2593  */
2594 
2595 static int xfrm_bundle_ok(struct xfrm_dst *first)
2596 {
2597 	struct dst_entry *dst = &first->u.dst;
2598 	struct xfrm_dst *last;
2599 	u32 mtu;
2600 
2601 	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2602 	    (dst->dev && !netif_running(dst->dev)))
2603 		return 0;
2604 
2605 	if (dst->flags & DST_XFRM_QUEUE)
2606 		return 1;
2607 
2608 	last = NULL;
2609 
2610 	do {
2611 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2612 
2613 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2614 			return 0;
2615 		if (xdst->xfrm_genid != dst->xfrm->genid)
2616 			return 0;
2617 		if (xdst->num_pols > 0 &&
2618 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2619 			return 0;
2620 
2621 		mtu = dst_mtu(dst->child);
2622 		if (xdst->child_mtu_cached != mtu) {
2623 			last = xdst;
2624 			xdst->child_mtu_cached = mtu;
2625 		}
2626 
2627 		if (!dst_check(xdst->route, xdst->route_cookie))
2628 			return 0;
2629 		mtu = dst_mtu(xdst->route);
2630 		if (xdst->route_mtu_cached != mtu) {
2631 			last = xdst;
2632 			xdst->route_mtu_cached = mtu;
2633 		}
2634 
2635 		dst = dst->child;
2636 	} while (dst->xfrm);
2637 
2638 	if (likely(!last))
2639 		return 1;
2640 
2641 	mtu = last->child_mtu_cached;
2642 	for (;;) {
2643 		dst = &last->u.dst;
2644 
2645 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2646 		if (mtu > last->route_mtu_cached)
2647 			mtu = last->route_mtu_cached;
2648 		dst_metric_set(dst, RTAX_MTU, mtu);
2649 
2650 		if (last == first)
2651 			break;
2652 
2653 		last = (struct xfrm_dst *)last->u.dst.next;
2654 		last->child_mtu_cached = mtu;
2655 	}
2656 
2657 	return 1;
2658 }
2659 
2660 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2661 {
2662 	return dst_metric_advmss(dst->path);
2663 }
2664 
2665 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2666 {
2667 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2668 
2669 	return mtu ? : dst_mtu(dst->path);
2670 }
2671 
2672 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2673 					   struct sk_buff *skb,
2674 					   const void *daddr)
2675 {
2676 	return dst->path->ops->neigh_lookup(dst, skb, daddr);
2677 }
2678 
2679 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2680 {
2681 	struct net *net;
2682 	int err = 0;
2683 	if (unlikely(afinfo == NULL))
2684 		return -EINVAL;
2685 	if (unlikely(afinfo->family >= NPROTO))
2686 		return -EAFNOSUPPORT;
2687 	spin_lock(&xfrm_policy_afinfo_lock);
2688 	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2689 		err = -ENOBUFS;
2690 	else {
2691 		struct dst_ops *dst_ops = afinfo->dst_ops;
2692 		if (likely(dst_ops->kmem_cachep == NULL))
2693 			dst_ops->kmem_cachep = xfrm_dst_cache;
2694 		if (likely(dst_ops->check == NULL))
2695 			dst_ops->check = xfrm_dst_check;
2696 		if (likely(dst_ops->default_advmss == NULL))
2697 			dst_ops->default_advmss = xfrm_default_advmss;
2698 		if (likely(dst_ops->mtu == NULL))
2699 			dst_ops->mtu = xfrm_mtu;
2700 		if (likely(dst_ops->negative_advice == NULL))
2701 			dst_ops->negative_advice = xfrm_negative_advice;
2702 		if (likely(dst_ops->link_failure == NULL))
2703 			dst_ops->link_failure = xfrm_link_failure;
2704 		if (likely(dst_ops->neigh_lookup == NULL))
2705 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2706 		if (likely(afinfo->garbage_collect == NULL))
2707 			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2708 		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2709 	}
2710 	spin_unlock(&xfrm_policy_afinfo_lock);
2711 
2712 	rtnl_lock();
2713 	for_each_net(net) {
2714 		struct dst_ops *xfrm_dst_ops;
2715 
2716 		switch (afinfo->family) {
2717 		case AF_INET:
2718 			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2719 			break;
2720 #if IS_ENABLED(CONFIG_IPV6)
2721 		case AF_INET6:
2722 			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2723 			break;
2724 #endif
2725 		default:
2726 			BUG();
2727 		}
2728 		*xfrm_dst_ops = *afinfo->dst_ops;
2729 	}
2730 	rtnl_unlock();
2731 
2732 	return err;
2733 }
2734 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2735 
2736 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2737 {
2738 	int err = 0;
2739 	if (unlikely(afinfo == NULL))
2740 		return -EINVAL;
2741 	if (unlikely(afinfo->family >= NPROTO))
2742 		return -EAFNOSUPPORT;
2743 	spin_lock(&xfrm_policy_afinfo_lock);
2744 	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2745 		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2746 			err = -EINVAL;
2747 		else
2748 			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2749 					 NULL);
2750 	}
2751 	spin_unlock(&xfrm_policy_afinfo_lock);
2752 	if (!err) {
2753 		struct dst_ops *dst_ops = afinfo->dst_ops;
2754 
2755 		synchronize_rcu();
2756 
2757 		dst_ops->kmem_cachep = NULL;
2758 		dst_ops->check = NULL;
2759 		dst_ops->negative_advice = NULL;
2760 		dst_ops->link_failure = NULL;
2761 		afinfo->garbage_collect = NULL;
2762 	}
2763 	return err;
2764 }
2765 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2766 
2767 static void __net_init xfrm_dst_ops_init(struct net *net)
2768 {
2769 	struct xfrm_policy_afinfo *afinfo;
2770 
2771 	rcu_read_lock();
2772 	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2773 	if (afinfo)
2774 		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2775 #if IS_ENABLED(CONFIG_IPV6)
2776 	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2777 	if (afinfo)
2778 		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2779 #endif
2780 	rcu_read_unlock();
2781 }
2782 
2783 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2784 {
2785 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2786 
2787 	switch (event) {
2788 	case NETDEV_DOWN:
2789 		xfrm_garbage_collect(dev_net(dev));
2790 	}
2791 	return NOTIFY_DONE;
2792 }
2793 
2794 static struct notifier_block xfrm_dev_notifier = {
2795 	.notifier_call	= xfrm_dev_event,
2796 };
2797 
2798 #ifdef CONFIG_XFRM_STATISTICS
2799 static int __net_init xfrm_statistics_init(struct net *net)
2800 {
2801 	int rv;
2802 
2803 	if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2804 			  sizeof(struct linux_xfrm_mib),
2805 			  __alignof__(struct linux_xfrm_mib)) < 0)
2806 		return -ENOMEM;
2807 	rv = xfrm_proc_init(net);
2808 	if (rv < 0)
2809 		snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2810 	return rv;
2811 }
2812 
2813 static void xfrm_statistics_fini(struct net *net)
2814 {
2815 	xfrm_proc_fini(net);
2816 	snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2817 }
2818 #else
2819 static int __net_init xfrm_statistics_init(struct net *net)
2820 {
2821 	return 0;
2822 }
2823 
2824 static void xfrm_statistics_fini(struct net *net)
2825 {
2826 }
2827 #endif
2828 
2829 static int __net_init xfrm_policy_init(struct net *net)
2830 {
2831 	unsigned int hmask, sz;
2832 	int dir;
2833 
2834 	if (net_eq(net, &init_net))
2835 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2836 					   sizeof(struct xfrm_dst),
2837 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2838 					   NULL);
2839 
2840 	hmask = 8 - 1;
2841 	sz = (hmask+1) * sizeof(struct hlist_head);
2842 
2843 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2844 	if (!net->xfrm.policy_byidx)
2845 		goto out_byidx;
2846 	net->xfrm.policy_idx_hmask = hmask;
2847 
2848 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2849 		struct xfrm_policy_hash *htab;
2850 
2851 		net->xfrm.policy_count[dir] = 0;
2852 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2853 
2854 		htab = &net->xfrm.policy_bydst[dir];
2855 		htab->table = xfrm_hash_alloc(sz);
2856 		if (!htab->table)
2857 			goto out_bydst;
2858 		htab->hmask = hmask;
2859 	}
2860 
2861 	INIT_LIST_HEAD(&net->xfrm.policy_all);
2862 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2863 	if (net_eq(net, &init_net))
2864 		register_netdevice_notifier(&xfrm_dev_notifier);
2865 	return 0;
2866 
2867 out_bydst:
2868 	for (dir--; dir >= 0; dir--) {
2869 		struct xfrm_policy_hash *htab;
2870 
2871 		htab = &net->xfrm.policy_bydst[dir];
2872 		xfrm_hash_free(htab->table, sz);
2873 	}
2874 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2875 out_byidx:
2876 	return -ENOMEM;
2877 }
2878 
2879 static void xfrm_policy_fini(struct net *net)
2880 {
2881 	struct xfrm_audit audit_info;
2882 	unsigned int sz;
2883 	int dir;
2884 
2885 	flush_work(&net->xfrm.policy_hash_work);
2886 #ifdef CONFIG_XFRM_SUB_POLICY
2887 	audit_info.loginuid = INVALID_UID;
2888 	audit_info.sessionid = -1;
2889 	audit_info.secid = 0;
2890 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2891 #endif
2892 	audit_info.loginuid = INVALID_UID;
2893 	audit_info.sessionid = -1;
2894 	audit_info.secid = 0;
2895 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2896 
2897 	WARN_ON(!list_empty(&net->xfrm.policy_all));
2898 
2899 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2900 		struct xfrm_policy_hash *htab;
2901 
2902 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2903 
2904 		htab = &net->xfrm.policy_bydst[dir];
2905 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2906 		WARN_ON(!hlist_empty(htab->table));
2907 		xfrm_hash_free(htab->table, sz);
2908 	}
2909 
2910 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2911 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2912 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2913 }
2914 
2915 static int __net_init xfrm_net_init(struct net *net)
2916 {
2917 	int rv;
2918 
2919 	rv = xfrm_statistics_init(net);
2920 	if (rv < 0)
2921 		goto out_statistics;
2922 	rv = xfrm_state_init(net);
2923 	if (rv < 0)
2924 		goto out_state;
2925 	rv = xfrm_policy_init(net);
2926 	if (rv < 0)
2927 		goto out_policy;
2928 	xfrm_dst_ops_init(net);
2929 	rv = xfrm_sysctl_init(net);
2930 	if (rv < 0)
2931 		goto out_sysctl;
2932 	return 0;
2933 
2934 out_sysctl:
2935 	xfrm_policy_fini(net);
2936 out_policy:
2937 	xfrm_state_fini(net);
2938 out_state:
2939 	xfrm_statistics_fini(net);
2940 out_statistics:
2941 	return rv;
2942 }
2943 
2944 static void __net_exit xfrm_net_exit(struct net *net)
2945 {
2946 	xfrm_sysctl_fini(net);
2947 	xfrm_policy_fini(net);
2948 	xfrm_state_fini(net);
2949 	xfrm_statistics_fini(net);
2950 }
2951 
2952 static struct pernet_operations __net_initdata xfrm_net_ops = {
2953 	.init = xfrm_net_init,
2954 	.exit = xfrm_net_exit,
2955 };
2956 
2957 void __init xfrm_init(void)
2958 {
2959 	register_pernet_subsys(&xfrm_net_ops);
2960 	xfrm_input_init();
2961 }
2962 
2963 #ifdef CONFIG_AUDITSYSCALL
2964 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2965 					 struct audit_buffer *audit_buf)
2966 {
2967 	struct xfrm_sec_ctx *ctx = xp->security;
2968 	struct xfrm_selector *sel = &xp->selector;
2969 
2970 	if (ctx)
2971 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2972 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2973 
2974 	switch(sel->family) {
2975 	case AF_INET:
2976 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2977 		if (sel->prefixlen_s != 32)
2978 			audit_log_format(audit_buf, " src_prefixlen=%d",
2979 					 sel->prefixlen_s);
2980 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2981 		if (sel->prefixlen_d != 32)
2982 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2983 					 sel->prefixlen_d);
2984 		break;
2985 	case AF_INET6:
2986 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2987 		if (sel->prefixlen_s != 128)
2988 			audit_log_format(audit_buf, " src_prefixlen=%d",
2989 					 sel->prefixlen_s);
2990 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2991 		if (sel->prefixlen_d != 128)
2992 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2993 					 sel->prefixlen_d);
2994 		break;
2995 	}
2996 }
2997 
2998 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2999 			   kuid_t auid, u32 sessionid, u32 secid)
3000 {
3001 	struct audit_buffer *audit_buf;
3002 
3003 	audit_buf = xfrm_audit_start("SPD-add");
3004 	if (audit_buf == NULL)
3005 		return;
3006 	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3007 	audit_log_format(audit_buf, " res=%u", result);
3008 	xfrm_audit_common_policyinfo(xp, audit_buf);
3009 	audit_log_end(audit_buf);
3010 }
3011 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3012 
3013 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3014 			      kuid_t auid, u32 sessionid, u32 secid)
3015 {
3016 	struct audit_buffer *audit_buf;
3017 
3018 	audit_buf = xfrm_audit_start("SPD-delete");
3019 	if (audit_buf == NULL)
3020 		return;
3021 	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3022 	audit_log_format(audit_buf, " res=%u", result);
3023 	xfrm_audit_common_policyinfo(xp, audit_buf);
3024 	audit_log_end(audit_buf);
3025 }
3026 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3027 #endif
3028 
3029 #ifdef CONFIG_XFRM_MIGRATE
3030 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3031 					const struct xfrm_selector *sel_tgt)
3032 {
3033 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3034 		if (sel_tgt->family == sel_cmp->family &&
3035 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3036 				    sel_cmp->family) &&
3037 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3038 				    sel_cmp->family) &&
3039 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3040 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3041 			return true;
3042 		}
3043 	} else {
3044 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3045 			return true;
3046 		}
3047 	}
3048 	return false;
3049 }
3050 
3051 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3052 						     u8 dir, u8 type)
3053 {
3054 	struct xfrm_policy *pol, *ret = NULL;
3055 	struct hlist_head *chain;
3056 	u32 priority = ~0U;
3057 
3058 	read_lock_bh(&xfrm_policy_lock);
3059 	chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
3060 	hlist_for_each_entry(pol, chain, bydst) {
3061 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3062 		    pol->type == type) {
3063 			ret = pol;
3064 			priority = ret->priority;
3065 			break;
3066 		}
3067 	}
3068 	chain = &init_net.xfrm.policy_inexact[dir];
3069 	hlist_for_each_entry(pol, chain, bydst) {
3070 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3071 		    pol->type == type &&
3072 		    pol->priority < priority) {
3073 			ret = pol;
3074 			break;
3075 		}
3076 	}
3077 
3078 	if (ret)
3079 		xfrm_pol_hold(ret);
3080 
3081 	read_unlock_bh(&xfrm_policy_lock);
3082 
3083 	return ret;
3084 }
3085 
3086 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3087 {
3088 	int match = 0;
3089 
3090 	if (t->mode == m->mode && t->id.proto == m->proto &&
3091 	    (m->reqid == 0 || t->reqid == m->reqid)) {
3092 		switch (t->mode) {
3093 		case XFRM_MODE_TUNNEL:
3094 		case XFRM_MODE_BEET:
3095 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3096 					    m->old_family) &&
3097 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3098 					    m->old_family)) {
3099 				match = 1;
3100 			}
3101 			break;
3102 		case XFRM_MODE_TRANSPORT:
3103 			/* in case of transport mode, template does not store
3104 			   any IP addresses, hence we just compare mode and
3105 			   protocol */
3106 			match = 1;
3107 			break;
3108 		default:
3109 			break;
3110 		}
3111 	}
3112 	return match;
3113 }
3114 
3115 /* update endpoint address(es) of template(s) */
3116 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3117 			       struct xfrm_migrate *m, int num_migrate)
3118 {
3119 	struct xfrm_migrate *mp;
3120 	int i, j, n = 0;
3121 
3122 	write_lock_bh(&pol->lock);
3123 	if (unlikely(pol->walk.dead)) {
3124 		/* target policy has been deleted */
3125 		write_unlock_bh(&pol->lock);
3126 		return -ENOENT;
3127 	}
3128 
3129 	for (i = 0; i < pol->xfrm_nr; i++) {
3130 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3131 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3132 				continue;
3133 			n++;
3134 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3135 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3136 				continue;
3137 			/* update endpoints */
3138 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3139 			       sizeof(pol->xfrm_vec[i].id.daddr));
3140 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3141 			       sizeof(pol->xfrm_vec[i].saddr));
3142 			pol->xfrm_vec[i].encap_family = mp->new_family;
3143 			/* flush bundles */
3144 			atomic_inc(&pol->genid);
3145 		}
3146 	}
3147 
3148 	write_unlock_bh(&pol->lock);
3149 
3150 	if (!n)
3151 		return -ENODATA;
3152 
3153 	return 0;
3154 }
3155 
3156 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3157 {
3158 	int i, j;
3159 
3160 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3161 		return -EINVAL;
3162 
3163 	for (i = 0; i < num_migrate; i++) {
3164 		if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3165 				    m[i].old_family) &&
3166 		    xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3167 				    m[i].old_family))
3168 			return -EINVAL;
3169 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3170 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3171 			return -EINVAL;
3172 
3173 		/* check if there is any duplicated entry */
3174 		for (j = i + 1; j < num_migrate; j++) {
3175 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3176 				    sizeof(m[i].old_daddr)) &&
3177 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3178 				    sizeof(m[i].old_saddr)) &&
3179 			    m[i].proto == m[j].proto &&
3180 			    m[i].mode == m[j].mode &&
3181 			    m[i].reqid == m[j].reqid &&
3182 			    m[i].old_family == m[j].old_family)
3183 				return -EINVAL;
3184 		}
3185 	}
3186 
3187 	return 0;
3188 }
3189 
3190 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3191 		 struct xfrm_migrate *m, int num_migrate,
3192 		 struct xfrm_kmaddress *k)
3193 {
3194 	int i, err, nx_cur = 0, nx_new = 0;
3195 	struct xfrm_policy *pol = NULL;
3196 	struct xfrm_state *x, *xc;
3197 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3198 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3199 	struct xfrm_migrate *mp;
3200 
3201 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3202 		goto out;
3203 
3204 	/* Stage 1 - find policy */
3205 	if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
3206 		err = -ENOENT;
3207 		goto out;
3208 	}
3209 
3210 	/* Stage 2 - find and update state(s) */
3211 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3212 		if ((x = xfrm_migrate_state_find(mp))) {
3213 			x_cur[nx_cur] = x;
3214 			nx_cur++;
3215 			if ((xc = xfrm_state_migrate(x, mp))) {
3216 				x_new[nx_new] = xc;
3217 				nx_new++;
3218 			} else {
3219 				err = -ENODATA;
3220 				goto restore_state;
3221 			}
3222 		}
3223 	}
3224 
3225 	/* Stage 3 - update policy */
3226 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3227 		goto restore_state;
3228 
3229 	/* Stage 4 - delete old state(s) */
3230 	if (nx_cur) {
3231 		xfrm_states_put(x_cur, nx_cur);
3232 		xfrm_states_delete(x_cur, nx_cur);
3233 	}
3234 
3235 	/* Stage 5 - announce */
3236 	km_migrate(sel, dir, type, m, num_migrate, k);
3237 
3238 	xfrm_pol_put(pol);
3239 
3240 	return 0;
3241 out:
3242 	return err;
3243 
3244 restore_state:
3245 	if (pol)
3246 		xfrm_pol_put(pol);
3247 	if (nx_cur)
3248 		xfrm_states_put(x_cur, nx_cur);
3249 	if (nx_new)
3250 		xfrm_states_delete(x_new, nx_new);
3251 
3252 	return err;
3253 }
3254 EXPORT_SYMBOL(xfrm_migrate);
3255 #endif
3256