xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision 77d84ff8)
1 /*
2  * xfrm_policy.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	Kazunori MIYAZAWA @USAGI
10  * 	YOSHIFUJI Hideaki
11  * 		Split up af-specific portion
12  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13  *
14  */
15 
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
28 #include <net/dst.h>
29 #include <net/flow.h>
30 #include <net/xfrm.h>
31 #include <net/ip.h>
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35 
36 #include "xfrm_hash.h"
37 
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN	100
41 
42 DEFINE_MUTEX(xfrm_cfg_mutex);
43 EXPORT_SYMBOL(xfrm_cfg_mutex);
44 
45 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
46 static struct dst_entry *xfrm_policy_sk_bundles;
47 static DEFINE_RWLOCK(xfrm_policy_lock);
48 
49 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
50 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
51 						__read_mostly;
52 
53 static struct kmem_cache *xfrm_dst_cache __read_mostly;
54 
55 static void xfrm_init_pmtu(struct dst_entry *dst);
56 static int stale_bundle(struct dst_entry *dst);
57 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
58 static void xfrm_policy_queue_process(unsigned long arg);
59 
60 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
61 						int dir);
62 
63 static inline bool
64 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
65 {
66 	const struct flowi4 *fl4 = &fl->u.ip4;
67 
68 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
69 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
70 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
71 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
72 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
73 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
74 }
75 
76 static inline bool
77 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
78 {
79 	const struct flowi6 *fl6 = &fl->u.ip6;
80 
81 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
82 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
83 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
84 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
85 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
86 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
87 }
88 
89 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
90 			 unsigned short family)
91 {
92 	switch (family) {
93 	case AF_INET:
94 		return __xfrm4_selector_match(sel, fl);
95 	case AF_INET6:
96 		return __xfrm6_selector_match(sel, fl);
97 	}
98 	return false;
99 }
100 
101 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
102 {
103 	struct xfrm_policy_afinfo *afinfo;
104 
105 	if (unlikely(family >= NPROTO))
106 		return NULL;
107 	rcu_read_lock();
108 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
109 	if (unlikely(!afinfo))
110 		rcu_read_unlock();
111 	return afinfo;
112 }
113 
114 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
115 {
116 	rcu_read_unlock();
117 }
118 
119 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
120 						  const xfrm_address_t *saddr,
121 						  const xfrm_address_t *daddr,
122 						  int family)
123 {
124 	struct xfrm_policy_afinfo *afinfo;
125 	struct dst_entry *dst;
126 
127 	afinfo = xfrm_policy_get_afinfo(family);
128 	if (unlikely(afinfo == NULL))
129 		return ERR_PTR(-EAFNOSUPPORT);
130 
131 	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
132 
133 	xfrm_policy_put_afinfo(afinfo);
134 
135 	return dst;
136 }
137 
138 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
139 						xfrm_address_t *prev_saddr,
140 						xfrm_address_t *prev_daddr,
141 						int family)
142 {
143 	struct net *net = xs_net(x);
144 	xfrm_address_t *saddr = &x->props.saddr;
145 	xfrm_address_t *daddr = &x->id.daddr;
146 	struct dst_entry *dst;
147 
148 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
149 		saddr = x->coaddr;
150 		daddr = prev_daddr;
151 	}
152 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
153 		saddr = prev_saddr;
154 		daddr = x->coaddr;
155 	}
156 
157 	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
158 
159 	if (!IS_ERR(dst)) {
160 		if (prev_saddr != saddr)
161 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
162 		if (prev_daddr != daddr)
163 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
164 	}
165 
166 	return dst;
167 }
168 
169 static inline unsigned long make_jiffies(long secs)
170 {
171 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
172 		return MAX_SCHEDULE_TIMEOUT-1;
173 	else
174 		return secs*HZ;
175 }
176 
177 static void xfrm_policy_timer(unsigned long data)
178 {
179 	struct xfrm_policy *xp = (struct xfrm_policy*)data;
180 	unsigned long now = get_seconds();
181 	long next = LONG_MAX;
182 	int warn = 0;
183 	int dir;
184 
185 	read_lock(&xp->lock);
186 
187 	if (unlikely(xp->walk.dead))
188 		goto out;
189 
190 	dir = xfrm_policy_id2dir(xp->index);
191 
192 	if (xp->lft.hard_add_expires_seconds) {
193 		long tmo = xp->lft.hard_add_expires_seconds +
194 			xp->curlft.add_time - now;
195 		if (tmo <= 0)
196 			goto expired;
197 		if (tmo < next)
198 			next = tmo;
199 	}
200 	if (xp->lft.hard_use_expires_seconds) {
201 		long tmo = xp->lft.hard_use_expires_seconds +
202 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
203 		if (tmo <= 0)
204 			goto expired;
205 		if (tmo < next)
206 			next = tmo;
207 	}
208 	if (xp->lft.soft_add_expires_seconds) {
209 		long tmo = xp->lft.soft_add_expires_seconds +
210 			xp->curlft.add_time - now;
211 		if (tmo <= 0) {
212 			warn = 1;
213 			tmo = XFRM_KM_TIMEOUT;
214 		}
215 		if (tmo < next)
216 			next = tmo;
217 	}
218 	if (xp->lft.soft_use_expires_seconds) {
219 		long tmo = xp->lft.soft_use_expires_seconds +
220 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
221 		if (tmo <= 0) {
222 			warn = 1;
223 			tmo = XFRM_KM_TIMEOUT;
224 		}
225 		if (tmo < next)
226 			next = tmo;
227 	}
228 
229 	if (warn)
230 		km_policy_expired(xp, dir, 0, 0);
231 	if (next != LONG_MAX &&
232 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
233 		xfrm_pol_hold(xp);
234 
235 out:
236 	read_unlock(&xp->lock);
237 	xfrm_pol_put(xp);
238 	return;
239 
240 expired:
241 	read_unlock(&xp->lock);
242 	if (!xfrm_policy_delete(xp, dir))
243 		km_policy_expired(xp, dir, 1, 0);
244 	xfrm_pol_put(xp);
245 }
246 
247 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
248 {
249 	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
250 
251 	if (unlikely(pol->walk.dead))
252 		flo = NULL;
253 	else
254 		xfrm_pol_hold(pol);
255 
256 	return flo;
257 }
258 
259 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
260 {
261 	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
262 
263 	return !pol->walk.dead;
264 }
265 
266 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
267 {
268 	xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
269 }
270 
271 static const struct flow_cache_ops xfrm_policy_fc_ops = {
272 	.get = xfrm_policy_flo_get,
273 	.check = xfrm_policy_flo_check,
274 	.delete = xfrm_policy_flo_delete,
275 };
276 
277 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
278  * SPD calls.
279  */
280 
281 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
282 {
283 	struct xfrm_policy *policy;
284 
285 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
286 
287 	if (policy) {
288 		write_pnet(&policy->xp_net, net);
289 		INIT_LIST_HEAD(&policy->walk.all);
290 		INIT_HLIST_NODE(&policy->bydst);
291 		INIT_HLIST_NODE(&policy->byidx);
292 		rwlock_init(&policy->lock);
293 		atomic_set(&policy->refcnt, 1);
294 		skb_queue_head_init(&policy->polq.hold_queue);
295 		setup_timer(&policy->timer, xfrm_policy_timer,
296 				(unsigned long)policy);
297 		setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
298 			    (unsigned long)policy);
299 		policy->flo.ops = &xfrm_policy_fc_ops;
300 	}
301 	return policy;
302 }
303 EXPORT_SYMBOL(xfrm_policy_alloc);
304 
305 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
306 
307 void xfrm_policy_destroy(struct xfrm_policy *policy)
308 {
309 	BUG_ON(!policy->walk.dead);
310 
311 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
312 		BUG();
313 
314 	security_xfrm_policy_free(policy->security);
315 	kfree(policy);
316 }
317 EXPORT_SYMBOL(xfrm_policy_destroy);
318 
319 static void xfrm_queue_purge(struct sk_buff_head *list)
320 {
321 	struct sk_buff *skb;
322 
323 	while ((skb = skb_dequeue(list)) != NULL)
324 		kfree_skb(skb);
325 }
326 
327 /* Rule must be locked. Release descentant resources, announce
328  * entry dead. The rule must be unlinked from lists to the moment.
329  */
330 
331 static void xfrm_policy_kill(struct xfrm_policy *policy)
332 {
333 	policy->walk.dead = 1;
334 
335 	atomic_inc(&policy->genid);
336 
337 	if (del_timer(&policy->polq.hold_timer))
338 		xfrm_pol_put(policy);
339 	xfrm_queue_purge(&policy->polq.hold_queue);
340 
341 	if (del_timer(&policy->timer))
342 		xfrm_pol_put(policy);
343 
344 	xfrm_pol_put(policy);
345 }
346 
347 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
348 
349 static inline unsigned int idx_hash(struct net *net, u32 index)
350 {
351 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
352 }
353 
354 static struct hlist_head *policy_hash_bysel(struct net *net,
355 					    const struct xfrm_selector *sel,
356 					    unsigned short family, int dir)
357 {
358 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
359 	unsigned int hash = __sel_hash(sel, family, hmask);
360 
361 	return (hash == hmask + 1 ?
362 		&net->xfrm.policy_inexact[dir] :
363 		net->xfrm.policy_bydst[dir].table + hash);
364 }
365 
366 static struct hlist_head *policy_hash_direct(struct net *net,
367 					     const xfrm_address_t *daddr,
368 					     const xfrm_address_t *saddr,
369 					     unsigned short family, int dir)
370 {
371 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
372 	unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
373 
374 	return net->xfrm.policy_bydst[dir].table + hash;
375 }
376 
377 static void xfrm_dst_hash_transfer(struct hlist_head *list,
378 				   struct hlist_head *ndsttable,
379 				   unsigned int nhashmask)
380 {
381 	struct hlist_node *tmp, *entry0 = NULL;
382 	struct xfrm_policy *pol;
383 	unsigned int h0 = 0;
384 
385 redo:
386 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
387 		unsigned int h;
388 
389 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
390 				pol->family, nhashmask);
391 		if (!entry0) {
392 			hlist_del(&pol->bydst);
393 			hlist_add_head(&pol->bydst, ndsttable+h);
394 			h0 = h;
395 		} else {
396 			if (h != h0)
397 				continue;
398 			hlist_del(&pol->bydst);
399 			hlist_add_after(entry0, &pol->bydst);
400 		}
401 		entry0 = &pol->bydst;
402 	}
403 	if (!hlist_empty(list)) {
404 		entry0 = NULL;
405 		goto redo;
406 	}
407 }
408 
409 static void xfrm_idx_hash_transfer(struct hlist_head *list,
410 				   struct hlist_head *nidxtable,
411 				   unsigned int nhashmask)
412 {
413 	struct hlist_node *tmp;
414 	struct xfrm_policy *pol;
415 
416 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
417 		unsigned int h;
418 
419 		h = __idx_hash(pol->index, nhashmask);
420 		hlist_add_head(&pol->byidx, nidxtable+h);
421 	}
422 }
423 
424 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
425 {
426 	return ((old_hmask + 1) << 1) - 1;
427 }
428 
429 static void xfrm_bydst_resize(struct net *net, int dir)
430 {
431 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
432 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
433 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
434 	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
435 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
436 	int i;
437 
438 	if (!ndst)
439 		return;
440 
441 	write_lock_bh(&xfrm_policy_lock);
442 
443 	for (i = hmask; i >= 0; i--)
444 		xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
445 
446 	net->xfrm.policy_bydst[dir].table = ndst;
447 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
448 
449 	write_unlock_bh(&xfrm_policy_lock);
450 
451 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
452 }
453 
454 static void xfrm_byidx_resize(struct net *net, int total)
455 {
456 	unsigned int hmask = net->xfrm.policy_idx_hmask;
457 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
458 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
459 	struct hlist_head *oidx = net->xfrm.policy_byidx;
460 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
461 	int i;
462 
463 	if (!nidx)
464 		return;
465 
466 	write_lock_bh(&xfrm_policy_lock);
467 
468 	for (i = hmask; i >= 0; i--)
469 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
470 
471 	net->xfrm.policy_byidx = nidx;
472 	net->xfrm.policy_idx_hmask = nhashmask;
473 
474 	write_unlock_bh(&xfrm_policy_lock);
475 
476 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
477 }
478 
479 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
480 {
481 	unsigned int cnt = net->xfrm.policy_count[dir];
482 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
483 
484 	if (total)
485 		*total += cnt;
486 
487 	if ((hmask + 1) < xfrm_policy_hashmax &&
488 	    cnt > hmask)
489 		return 1;
490 
491 	return 0;
492 }
493 
494 static inline int xfrm_byidx_should_resize(struct net *net, int total)
495 {
496 	unsigned int hmask = net->xfrm.policy_idx_hmask;
497 
498 	if ((hmask + 1) < xfrm_policy_hashmax &&
499 	    total > hmask)
500 		return 1;
501 
502 	return 0;
503 }
504 
505 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
506 {
507 	read_lock_bh(&xfrm_policy_lock);
508 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
509 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
510 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
511 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
512 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
513 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
514 	si->spdhcnt = net->xfrm.policy_idx_hmask;
515 	si->spdhmcnt = xfrm_policy_hashmax;
516 	read_unlock_bh(&xfrm_policy_lock);
517 }
518 EXPORT_SYMBOL(xfrm_spd_getinfo);
519 
520 static DEFINE_MUTEX(hash_resize_mutex);
521 static void xfrm_hash_resize(struct work_struct *work)
522 {
523 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
524 	int dir, total;
525 
526 	mutex_lock(&hash_resize_mutex);
527 
528 	total = 0;
529 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
530 		if (xfrm_bydst_should_resize(net, dir, &total))
531 			xfrm_bydst_resize(net, dir);
532 	}
533 	if (xfrm_byidx_should_resize(net, total))
534 		xfrm_byidx_resize(net, total);
535 
536 	mutex_unlock(&hash_resize_mutex);
537 }
538 
539 /* Generate new index... KAME seems to generate them ordered by cost
540  * of an absolute inpredictability of ordering of rules. This will not pass. */
541 static u32 xfrm_gen_index(struct net *net, int dir)
542 {
543 	static u32 idx_generator;
544 
545 	for (;;) {
546 		struct hlist_head *list;
547 		struct xfrm_policy *p;
548 		u32 idx;
549 		int found;
550 
551 		idx = (idx_generator | dir);
552 		idx_generator += 8;
553 		if (idx == 0)
554 			idx = 8;
555 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
556 		found = 0;
557 		hlist_for_each_entry(p, list, byidx) {
558 			if (p->index == idx) {
559 				found = 1;
560 				break;
561 			}
562 		}
563 		if (!found)
564 			return idx;
565 	}
566 }
567 
568 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
569 {
570 	u32 *p1 = (u32 *) s1;
571 	u32 *p2 = (u32 *) s2;
572 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
573 	int i;
574 
575 	for (i = 0; i < len; i++) {
576 		if (p1[i] != p2[i])
577 			return 1;
578 	}
579 
580 	return 0;
581 }
582 
583 static void xfrm_policy_requeue(struct xfrm_policy *old,
584 				struct xfrm_policy *new)
585 {
586 	struct xfrm_policy_queue *pq = &old->polq;
587 	struct sk_buff_head list;
588 
589 	__skb_queue_head_init(&list);
590 
591 	spin_lock_bh(&pq->hold_queue.lock);
592 	skb_queue_splice_init(&pq->hold_queue, &list);
593 	if (del_timer(&pq->hold_timer))
594 		xfrm_pol_put(old);
595 	spin_unlock_bh(&pq->hold_queue.lock);
596 
597 	if (skb_queue_empty(&list))
598 		return;
599 
600 	pq = &new->polq;
601 
602 	spin_lock_bh(&pq->hold_queue.lock);
603 	skb_queue_splice(&list, &pq->hold_queue);
604 	pq->timeout = XFRM_QUEUE_TMO_MIN;
605 	if (!mod_timer(&pq->hold_timer, jiffies))
606 		xfrm_pol_hold(new);
607 	spin_unlock_bh(&pq->hold_queue.lock);
608 }
609 
610 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
611 				   struct xfrm_policy *pol)
612 {
613 	u32 mark = policy->mark.v & policy->mark.m;
614 
615 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
616 		return true;
617 
618 	if ((mark & pol->mark.m) == pol->mark.v &&
619 	    policy->priority == pol->priority)
620 		return true;
621 
622 	return false;
623 }
624 
625 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
626 {
627 	struct net *net = xp_net(policy);
628 	struct xfrm_policy *pol;
629 	struct xfrm_policy *delpol;
630 	struct hlist_head *chain;
631 	struct hlist_node *newpos;
632 
633 	write_lock_bh(&xfrm_policy_lock);
634 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
635 	delpol = NULL;
636 	newpos = NULL;
637 	hlist_for_each_entry(pol, chain, bydst) {
638 		if (pol->type == policy->type &&
639 		    !selector_cmp(&pol->selector, &policy->selector) &&
640 		    xfrm_policy_mark_match(policy, pol) &&
641 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
642 		    !WARN_ON(delpol)) {
643 			if (excl) {
644 				write_unlock_bh(&xfrm_policy_lock);
645 				return -EEXIST;
646 			}
647 			delpol = pol;
648 			if (policy->priority > pol->priority)
649 				continue;
650 		} else if (policy->priority >= pol->priority) {
651 			newpos = &pol->bydst;
652 			continue;
653 		}
654 		if (delpol)
655 			break;
656 	}
657 	if (newpos)
658 		hlist_add_after(newpos, &policy->bydst);
659 	else
660 		hlist_add_head(&policy->bydst, chain);
661 	xfrm_pol_hold(policy);
662 	net->xfrm.policy_count[dir]++;
663 	atomic_inc(&flow_cache_genid);
664 
665 	/* After previous checking, family can either be AF_INET or AF_INET6 */
666 	if (policy->family == AF_INET)
667 		rt_genid_bump_ipv4(net);
668 	else
669 		rt_genid_bump_ipv6(net);
670 
671 	if (delpol) {
672 		xfrm_policy_requeue(delpol, policy);
673 		__xfrm_policy_unlink(delpol, dir);
674 	}
675 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
676 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
677 	policy->curlft.add_time = get_seconds();
678 	policy->curlft.use_time = 0;
679 	if (!mod_timer(&policy->timer, jiffies + HZ))
680 		xfrm_pol_hold(policy);
681 	list_add(&policy->walk.all, &net->xfrm.policy_all);
682 	write_unlock_bh(&xfrm_policy_lock);
683 
684 	if (delpol)
685 		xfrm_policy_kill(delpol);
686 	else if (xfrm_bydst_should_resize(net, dir, NULL))
687 		schedule_work(&net->xfrm.policy_hash_work);
688 
689 	return 0;
690 }
691 EXPORT_SYMBOL(xfrm_policy_insert);
692 
693 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
694 					  int dir, struct xfrm_selector *sel,
695 					  struct xfrm_sec_ctx *ctx, int delete,
696 					  int *err)
697 {
698 	struct xfrm_policy *pol, *ret;
699 	struct hlist_head *chain;
700 
701 	*err = 0;
702 	write_lock_bh(&xfrm_policy_lock);
703 	chain = policy_hash_bysel(net, sel, sel->family, dir);
704 	ret = NULL;
705 	hlist_for_each_entry(pol, chain, bydst) {
706 		if (pol->type == type &&
707 		    (mark & pol->mark.m) == pol->mark.v &&
708 		    !selector_cmp(sel, &pol->selector) &&
709 		    xfrm_sec_ctx_match(ctx, pol->security)) {
710 			xfrm_pol_hold(pol);
711 			if (delete) {
712 				*err = security_xfrm_policy_delete(
713 								pol->security);
714 				if (*err) {
715 					write_unlock_bh(&xfrm_policy_lock);
716 					return pol;
717 				}
718 				__xfrm_policy_unlink(pol, dir);
719 			}
720 			ret = pol;
721 			break;
722 		}
723 	}
724 	write_unlock_bh(&xfrm_policy_lock);
725 
726 	if (ret && delete)
727 		xfrm_policy_kill(ret);
728 	return ret;
729 }
730 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
731 
732 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
733 				     int dir, u32 id, int delete, int *err)
734 {
735 	struct xfrm_policy *pol, *ret;
736 	struct hlist_head *chain;
737 
738 	*err = -ENOENT;
739 	if (xfrm_policy_id2dir(id) != dir)
740 		return NULL;
741 
742 	*err = 0;
743 	write_lock_bh(&xfrm_policy_lock);
744 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
745 	ret = NULL;
746 	hlist_for_each_entry(pol, chain, byidx) {
747 		if (pol->type == type && pol->index == id &&
748 		    (mark & pol->mark.m) == pol->mark.v) {
749 			xfrm_pol_hold(pol);
750 			if (delete) {
751 				*err = security_xfrm_policy_delete(
752 								pol->security);
753 				if (*err) {
754 					write_unlock_bh(&xfrm_policy_lock);
755 					return pol;
756 				}
757 				__xfrm_policy_unlink(pol, dir);
758 			}
759 			ret = pol;
760 			break;
761 		}
762 	}
763 	write_unlock_bh(&xfrm_policy_lock);
764 
765 	if (ret && delete)
766 		xfrm_policy_kill(ret);
767 	return ret;
768 }
769 EXPORT_SYMBOL(xfrm_policy_byid);
770 
771 #ifdef CONFIG_SECURITY_NETWORK_XFRM
772 static inline int
773 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
774 {
775 	int dir, err = 0;
776 
777 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
778 		struct xfrm_policy *pol;
779 		int i;
780 
781 		hlist_for_each_entry(pol,
782 				     &net->xfrm.policy_inexact[dir], bydst) {
783 			if (pol->type != type)
784 				continue;
785 			err = security_xfrm_policy_delete(pol->security);
786 			if (err) {
787 				xfrm_audit_policy_delete(pol, 0,
788 							 audit_info->loginuid,
789 							 audit_info->sessionid,
790 							 audit_info->secid);
791 				return err;
792 			}
793 		}
794 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
795 			hlist_for_each_entry(pol,
796 					     net->xfrm.policy_bydst[dir].table + i,
797 					     bydst) {
798 				if (pol->type != type)
799 					continue;
800 				err = security_xfrm_policy_delete(
801 								pol->security);
802 				if (err) {
803 					xfrm_audit_policy_delete(pol, 0,
804 							audit_info->loginuid,
805 							audit_info->sessionid,
806 							audit_info->secid);
807 					return err;
808 				}
809 			}
810 		}
811 	}
812 	return err;
813 }
814 #else
815 static inline int
816 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
817 {
818 	return 0;
819 }
820 #endif
821 
822 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
823 {
824 	int dir, err = 0, cnt = 0;
825 
826 	write_lock_bh(&xfrm_policy_lock);
827 
828 	err = xfrm_policy_flush_secctx_check(net, type, audit_info);
829 	if (err)
830 		goto out;
831 
832 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
833 		struct xfrm_policy *pol;
834 		int i;
835 
836 	again1:
837 		hlist_for_each_entry(pol,
838 				     &net->xfrm.policy_inexact[dir], bydst) {
839 			if (pol->type != type)
840 				continue;
841 			__xfrm_policy_unlink(pol, dir);
842 			write_unlock_bh(&xfrm_policy_lock);
843 			cnt++;
844 
845 			xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
846 						 audit_info->sessionid,
847 						 audit_info->secid);
848 
849 			xfrm_policy_kill(pol);
850 
851 			write_lock_bh(&xfrm_policy_lock);
852 			goto again1;
853 		}
854 
855 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
856 	again2:
857 			hlist_for_each_entry(pol,
858 					     net->xfrm.policy_bydst[dir].table + i,
859 					     bydst) {
860 				if (pol->type != type)
861 					continue;
862 				__xfrm_policy_unlink(pol, dir);
863 				write_unlock_bh(&xfrm_policy_lock);
864 				cnt++;
865 
866 				xfrm_audit_policy_delete(pol, 1,
867 							 audit_info->loginuid,
868 							 audit_info->sessionid,
869 							 audit_info->secid);
870 				xfrm_policy_kill(pol);
871 
872 				write_lock_bh(&xfrm_policy_lock);
873 				goto again2;
874 			}
875 		}
876 
877 	}
878 	if (!cnt)
879 		err = -ESRCH;
880 out:
881 	write_unlock_bh(&xfrm_policy_lock);
882 	return err;
883 }
884 EXPORT_SYMBOL(xfrm_policy_flush);
885 
886 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
887 		     int (*func)(struct xfrm_policy *, int, int, void*),
888 		     void *data)
889 {
890 	struct xfrm_policy *pol;
891 	struct xfrm_policy_walk_entry *x;
892 	int error = 0;
893 
894 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
895 	    walk->type != XFRM_POLICY_TYPE_ANY)
896 		return -EINVAL;
897 
898 	if (list_empty(&walk->walk.all) && walk->seq != 0)
899 		return 0;
900 
901 	write_lock_bh(&xfrm_policy_lock);
902 	if (list_empty(&walk->walk.all))
903 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
904 	else
905 		x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
906 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
907 		if (x->dead)
908 			continue;
909 		pol = container_of(x, struct xfrm_policy, walk);
910 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
911 		    walk->type != pol->type)
912 			continue;
913 		error = func(pol, xfrm_policy_id2dir(pol->index),
914 			     walk->seq, data);
915 		if (error) {
916 			list_move_tail(&walk->walk.all, &x->all);
917 			goto out;
918 		}
919 		walk->seq++;
920 	}
921 	if (walk->seq == 0) {
922 		error = -ENOENT;
923 		goto out;
924 	}
925 	list_del_init(&walk->walk.all);
926 out:
927 	write_unlock_bh(&xfrm_policy_lock);
928 	return error;
929 }
930 EXPORT_SYMBOL(xfrm_policy_walk);
931 
932 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
933 {
934 	INIT_LIST_HEAD(&walk->walk.all);
935 	walk->walk.dead = 1;
936 	walk->type = type;
937 	walk->seq = 0;
938 }
939 EXPORT_SYMBOL(xfrm_policy_walk_init);
940 
941 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
942 {
943 	if (list_empty(&walk->walk.all))
944 		return;
945 
946 	write_lock_bh(&xfrm_policy_lock);
947 	list_del(&walk->walk.all);
948 	write_unlock_bh(&xfrm_policy_lock);
949 }
950 EXPORT_SYMBOL(xfrm_policy_walk_done);
951 
952 /*
953  * Find policy to apply to this flow.
954  *
955  * Returns 0 if policy found, else an -errno.
956  */
957 static int xfrm_policy_match(const struct xfrm_policy *pol,
958 			     const struct flowi *fl,
959 			     u8 type, u16 family, int dir)
960 {
961 	const struct xfrm_selector *sel = &pol->selector;
962 	int ret = -ESRCH;
963 	bool match;
964 
965 	if (pol->family != family ||
966 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
967 	    pol->type != type)
968 		return ret;
969 
970 	match = xfrm_selector_match(sel, fl, family);
971 	if (match)
972 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
973 						  dir);
974 
975 	return ret;
976 }
977 
978 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
979 						     const struct flowi *fl,
980 						     u16 family, u8 dir)
981 {
982 	int err;
983 	struct xfrm_policy *pol, *ret;
984 	const xfrm_address_t *daddr, *saddr;
985 	struct hlist_head *chain;
986 	u32 priority = ~0U;
987 
988 	daddr = xfrm_flowi_daddr(fl, family);
989 	saddr = xfrm_flowi_saddr(fl, family);
990 	if (unlikely(!daddr || !saddr))
991 		return NULL;
992 
993 	read_lock_bh(&xfrm_policy_lock);
994 	chain = policy_hash_direct(net, daddr, saddr, family, dir);
995 	ret = NULL;
996 	hlist_for_each_entry(pol, chain, bydst) {
997 		err = xfrm_policy_match(pol, fl, type, family, dir);
998 		if (err) {
999 			if (err == -ESRCH)
1000 				continue;
1001 			else {
1002 				ret = ERR_PTR(err);
1003 				goto fail;
1004 			}
1005 		} else {
1006 			ret = pol;
1007 			priority = ret->priority;
1008 			break;
1009 		}
1010 	}
1011 	chain = &net->xfrm.policy_inexact[dir];
1012 	hlist_for_each_entry(pol, chain, bydst) {
1013 		err = xfrm_policy_match(pol, fl, type, family, dir);
1014 		if (err) {
1015 			if (err == -ESRCH)
1016 				continue;
1017 			else {
1018 				ret = ERR_PTR(err);
1019 				goto fail;
1020 			}
1021 		} else if (pol->priority < priority) {
1022 			ret = pol;
1023 			break;
1024 		}
1025 	}
1026 	if (ret)
1027 		xfrm_pol_hold(ret);
1028 fail:
1029 	read_unlock_bh(&xfrm_policy_lock);
1030 
1031 	return ret;
1032 }
1033 
1034 static struct xfrm_policy *
1035 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1036 {
1037 #ifdef CONFIG_XFRM_SUB_POLICY
1038 	struct xfrm_policy *pol;
1039 
1040 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1041 	if (pol != NULL)
1042 		return pol;
1043 #endif
1044 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1045 }
1046 
1047 static int flow_to_policy_dir(int dir)
1048 {
1049 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1050 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1051 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1052 		return dir;
1053 
1054 	switch (dir) {
1055 	default:
1056 	case FLOW_DIR_IN:
1057 		return XFRM_POLICY_IN;
1058 	case FLOW_DIR_OUT:
1059 		return XFRM_POLICY_OUT;
1060 	case FLOW_DIR_FWD:
1061 		return XFRM_POLICY_FWD;
1062 	}
1063 }
1064 
1065 static struct flow_cache_object *
1066 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1067 		   u8 dir, struct flow_cache_object *old_obj, void *ctx)
1068 {
1069 	struct xfrm_policy *pol;
1070 
1071 	if (old_obj)
1072 		xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1073 
1074 	pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1075 	if (IS_ERR_OR_NULL(pol))
1076 		return ERR_CAST(pol);
1077 
1078 	/* Resolver returns two references:
1079 	 * one for cache and one for caller of flow_cache_lookup() */
1080 	xfrm_pol_hold(pol);
1081 
1082 	return &pol->flo;
1083 }
1084 
1085 static inline int policy_to_flow_dir(int dir)
1086 {
1087 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1088 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1089 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1090 		return dir;
1091 	switch (dir) {
1092 	default:
1093 	case XFRM_POLICY_IN:
1094 		return FLOW_DIR_IN;
1095 	case XFRM_POLICY_OUT:
1096 		return FLOW_DIR_OUT;
1097 	case XFRM_POLICY_FWD:
1098 		return FLOW_DIR_FWD;
1099 	}
1100 }
1101 
1102 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1103 						 const struct flowi *fl)
1104 {
1105 	struct xfrm_policy *pol;
1106 
1107 	read_lock_bh(&xfrm_policy_lock);
1108 	if ((pol = sk->sk_policy[dir]) != NULL) {
1109 		bool match = xfrm_selector_match(&pol->selector, fl,
1110 						 sk->sk_family);
1111 		int err = 0;
1112 
1113 		if (match) {
1114 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1115 				pol = NULL;
1116 				goto out;
1117 			}
1118 			err = security_xfrm_policy_lookup(pol->security,
1119 						      fl->flowi_secid,
1120 						      policy_to_flow_dir(dir));
1121 			if (!err)
1122 				xfrm_pol_hold(pol);
1123 			else if (err == -ESRCH)
1124 				pol = NULL;
1125 			else
1126 				pol = ERR_PTR(err);
1127 		} else
1128 			pol = NULL;
1129 	}
1130 out:
1131 	read_unlock_bh(&xfrm_policy_lock);
1132 	return pol;
1133 }
1134 
1135 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1136 {
1137 	struct net *net = xp_net(pol);
1138 	struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1139 						     pol->family, dir);
1140 
1141 	list_add(&pol->walk.all, &net->xfrm.policy_all);
1142 	hlist_add_head(&pol->bydst, chain);
1143 	hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1144 	net->xfrm.policy_count[dir]++;
1145 	xfrm_pol_hold(pol);
1146 
1147 	if (xfrm_bydst_should_resize(net, dir, NULL))
1148 		schedule_work(&net->xfrm.policy_hash_work);
1149 }
1150 
1151 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1152 						int dir)
1153 {
1154 	struct net *net = xp_net(pol);
1155 
1156 	if (hlist_unhashed(&pol->bydst))
1157 		return NULL;
1158 
1159 	hlist_del(&pol->bydst);
1160 	hlist_del(&pol->byidx);
1161 	list_del(&pol->walk.all);
1162 	net->xfrm.policy_count[dir]--;
1163 
1164 	return pol;
1165 }
1166 
1167 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1168 {
1169 	write_lock_bh(&xfrm_policy_lock);
1170 	pol = __xfrm_policy_unlink(pol, dir);
1171 	write_unlock_bh(&xfrm_policy_lock);
1172 	if (pol) {
1173 		xfrm_policy_kill(pol);
1174 		return 0;
1175 	}
1176 	return -ENOENT;
1177 }
1178 EXPORT_SYMBOL(xfrm_policy_delete);
1179 
1180 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1181 {
1182 	struct net *net = xp_net(pol);
1183 	struct xfrm_policy *old_pol;
1184 
1185 #ifdef CONFIG_XFRM_SUB_POLICY
1186 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1187 		return -EINVAL;
1188 #endif
1189 
1190 	write_lock_bh(&xfrm_policy_lock);
1191 	old_pol = sk->sk_policy[dir];
1192 	sk->sk_policy[dir] = pol;
1193 	if (pol) {
1194 		pol->curlft.add_time = get_seconds();
1195 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1196 		__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1197 	}
1198 	if (old_pol) {
1199 		if (pol)
1200 			xfrm_policy_requeue(old_pol, pol);
1201 
1202 		/* Unlinking succeeds always. This is the only function
1203 		 * allowed to delete or replace socket policy.
1204 		 */
1205 		__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1206 	}
1207 	write_unlock_bh(&xfrm_policy_lock);
1208 
1209 	if (old_pol) {
1210 		xfrm_policy_kill(old_pol);
1211 	}
1212 	return 0;
1213 }
1214 
1215 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1216 {
1217 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1218 
1219 	if (newp) {
1220 		newp->selector = old->selector;
1221 		if (security_xfrm_policy_clone(old->security,
1222 					       &newp->security)) {
1223 			kfree(newp);
1224 			return NULL;  /* ENOMEM */
1225 		}
1226 		newp->lft = old->lft;
1227 		newp->curlft = old->curlft;
1228 		newp->mark = old->mark;
1229 		newp->action = old->action;
1230 		newp->flags = old->flags;
1231 		newp->xfrm_nr = old->xfrm_nr;
1232 		newp->index = old->index;
1233 		newp->type = old->type;
1234 		memcpy(newp->xfrm_vec, old->xfrm_vec,
1235 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1236 		write_lock_bh(&xfrm_policy_lock);
1237 		__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1238 		write_unlock_bh(&xfrm_policy_lock);
1239 		xfrm_pol_put(newp);
1240 	}
1241 	return newp;
1242 }
1243 
1244 int __xfrm_sk_clone_policy(struct sock *sk)
1245 {
1246 	struct xfrm_policy *p0 = sk->sk_policy[0],
1247 			   *p1 = sk->sk_policy[1];
1248 
1249 	sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1250 	if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1251 		return -ENOMEM;
1252 	if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1253 		return -ENOMEM;
1254 	return 0;
1255 }
1256 
1257 static int
1258 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1259 	       unsigned short family)
1260 {
1261 	int err;
1262 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1263 
1264 	if (unlikely(afinfo == NULL))
1265 		return -EINVAL;
1266 	err = afinfo->get_saddr(net, local, remote);
1267 	xfrm_policy_put_afinfo(afinfo);
1268 	return err;
1269 }
1270 
1271 /* Resolve list of templates for the flow, given policy. */
1272 
1273 static int
1274 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1275 		      struct xfrm_state **xfrm, unsigned short family)
1276 {
1277 	struct net *net = xp_net(policy);
1278 	int nx;
1279 	int i, error;
1280 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1281 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1282 	xfrm_address_t tmp;
1283 
1284 	for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1285 		struct xfrm_state *x;
1286 		xfrm_address_t *remote = daddr;
1287 		xfrm_address_t *local  = saddr;
1288 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1289 
1290 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1291 		    tmpl->mode == XFRM_MODE_BEET) {
1292 			remote = &tmpl->id.daddr;
1293 			local = &tmpl->saddr;
1294 			if (xfrm_addr_any(local, tmpl->encap_family)) {
1295 				error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1296 				if (error)
1297 					goto fail;
1298 				local = &tmp;
1299 			}
1300 		}
1301 
1302 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1303 
1304 		if (x && x->km.state == XFRM_STATE_VALID) {
1305 			xfrm[nx++] = x;
1306 			daddr = remote;
1307 			saddr = local;
1308 			continue;
1309 		}
1310 		if (x) {
1311 			error = (x->km.state == XFRM_STATE_ERROR ?
1312 				 -EINVAL : -EAGAIN);
1313 			xfrm_state_put(x);
1314 		}
1315 		else if (error == -ESRCH)
1316 			error = -EAGAIN;
1317 
1318 		if (!tmpl->optional)
1319 			goto fail;
1320 	}
1321 	return nx;
1322 
1323 fail:
1324 	for (nx--; nx>=0; nx--)
1325 		xfrm_state_put(xfrm[nx]);
1326 	return error;
1327 }
1328 
1329 static int
1330 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1331 		  struct xfrm_state **xfrm, unsigned short family)
1332 {
1333 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1334 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1335 	int cnx = 0;
1336 	int error;
1337 	int ret;
1338 	int i;
1339 
1340 	for (i = 0; i < npols; i++) {
1341 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1342 			error = -ENOBUFS;
1343 			goto fail;
1344 		}
1345 
1346 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1347 		if (ret < 0) {
1348 			error = ret;
1349 			goto fail;
1350 		} else
1351 			cnx += ret;
1352 	}
1353 
1354 	/* found states are sorted for outbound processing */
1355 	if (npols > 1)
1356 		xfrm_state_sort(xfrm, tpp, cnx, family);
1357 
1358 	return cnx;
1359 
1360  fail:
1361 	for (cnx--; cnx>=0; cnx--)
1362 		xfrm_state_put(tpp[cnx]);
1363 	return error;
1364 
1365 }
1366 
1367 /* Check that the bundle accepts the flow and its components are
1368  * still valid.
1369  */
1370 
1371 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1372 {
1373 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1374 	int tos;
1375 
1376 	if (!afinfo)
1377 		return -EINVAL;
1378 
1379 	tos = afinfo->get_tos(fl);
1380 
1381 	xfrm_policy_put_afinfo(afinfo);
1382 
1383 	return tos;
1384 }
1385 
1386 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1387 {
1388 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1389 	struct dst_entry *dst = &xdst->u.dst;
1390 
1391 	if (xdst->route == NULL) {
1392 		/* Dummy bundle - if it has xfrms we were not
1393 		 * able to build bundle as template resolution failed.
1394 		 * It means we need to try again resolving. */
1395 		if (xdst->num_xfrms > 0)
1396 			return NULL;
1397 	} else if (dst->flags & DST_XFRM_QUEUE) {
1398 		return NULL;
1399 	} else {
1400 		/* Real bundle */
1401 		if (stale_bundle(dst))
1402 			return NULL;
1403 	}
1404 
1405 	dst_hold(dst);
1406 	return flo;
1407 }
1408 
1409 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1410 {
1411 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1412 	struct dst_entry *dst = &xdst->u.dst;
1413 
1414 	if (!xdst->route)
1415 		return 0;
1416 	if (stale_bundle(dst))
1417 		return 0;
1418 
1419 	return 1;
1420 }
1421 
1422 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1423 {
1424 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1425 	struct dst_entry *dst = &xdst->u.dst;
1426 
1427 	dst_free(dst);
1428 }
1429 
1430 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1431 	.get = xfrm_bundle_flo_get,
1432 	.check = xfrm_bundle_flo_check,
1433 	.delete = xfrm_bundle_flo_delete,
1434 };
1435 
1436 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1437 {
1438 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1439 	struct dst_ops *dst_ops;
1440 	struct xfrm_dst *xdst;
1441 
1442 	if (!afinfo)
1443 		return ERR_PTR(-EINVAL);
1444 
1445 	switch (family) {
1446 	case AF_INET:
1447 		dst_ops = &net->xfrm.xfrm4_dst_ops;
1448 		break;
1449 #if IS_ENABLED(CONFIG_IPV6)
1450 	case AF_INET6:
1451 		dst_ops = &net->xfrm.xfrm6_dst_ops;
1452 		break;
1453 #endif
1454 	default:
1455 		BUG();
1456 	}
1457 	xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1458 
1459 	if (likely(xdst)) {
1460 		struct dst_entry *dst = &xdst->u.dst;
1461 
1462 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1463 		xdst->flo.ops = &xfrm_bundle_fc_ops;
1464 		if (afinfo->init_dst)
1465 			afinfo->init_dst(net, xdst);
1466 	} else
1467 		xdst = ERR_PTR(-ENOBUFS);
1468 
1469 	xfrm_policy_put_afinfo(afinfo);
1470 
1471 	return xdst;
1472 }
1473 
1474 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1475 				 int nfheader_len)
1476 {
1477 	struct xfrm_policy_afinfo *afinfo =
1478 		xfrm_policy_get_afinfo(dst->ops->family);
1479 	int err;
1480 
1481 	if (!afinfo)
1482 		return -EINVAL;
1483 
1484 	err = afinfo->init_path(path, dst, nfheader_len);
1485 
1486 	xfrm_policy_put_afinfo(afinfo);
1487 
1488 	return err;
1489 }
1490 
1491 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1492 				const struct flowi *fl)
1493 {
1494 	struct xfrm_policy_afinfo *afinfo =
1495 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1496 	int err;
1497 
1498 	if (!afinfo)
1499 		return -EINVAL;
1500 
1501 	err = afinfo->fill_dst(xdst, dev, fl);
1502 
1503 	xfrm_policy_put_afinfo(afinfo);
1504 
1505 	return err;
1506 }
1507 
1508 
1509 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1510  * all the metrics... Shortly, bundle a bundle.
1511  */
1512 
1513 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1514 					    struct xfrm_state **xfrm, int nx,
1515 					    const struct flowi *fl,
1516 					    struct dst_entry *dst)
1517 {
1518 	struct net *net = xp_net(policy);
1519 	unsigned long now = jiffies;
1520 	struct net_device *dev;
1521 	struct xfrm_mode *inner_mode;
1522 	struct dst_entry *dst_prev = NULL;
1523 	struct dst_entry *dst0 = NULL;
1524 	int i = 0;
1525 	int err;
1526 	int header_len = 0;
1527 	int nfheader_len = 0;
1528 	int trailer_len = 0;
1529 	int tos;
1530 	int family = policy->selector.family;
1531 	xfrm_address_t saddr, daddr;
1532 
1533 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1534 
1535 	tos = xfrm_get_tos(fl, family);
1536 	err = tos;
1537 	if (tos < 0)
1538 		goto put_states;
1539 
1540 	dst_hold(dst);
1541 
1542 	for (; i < nx; i++) {
1543 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1544 		struct dst_entry *dst1 = &xdst->u.dst;
1545 
1546 		err = PTR_ERR(xdst);
1547 		if (IS_ERR(xdst)) {
1548 			dst_release(dst);
1549 			goto put_states;
1550 		}
1551 
1552 		if (xfrm[i]->sel.family == AF_UNSPEC) {
1553 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1554 							xfrm_af2proto(family));
1555 			if (!inner_mode) {
1556 				err = -EAFNOSUPPORT;
1557 				dst_release(dst);
1558 				goto put_states;
1559 			}
1560 		} else
1561 			inner_mode = xfrm[i]->inner_mode;
1562 
1563 		if (!dst_prev)
1564 			dst0 = dst1;
1565 		else {
1566 			dst_prev->child = dst_clone(dst1);
1567 			dst1->flags |= DST_NOHASH;
1568 		}
1569 
1570 		xdst->route = dst;
1571 		dst_copy_metrics(dst1, dst);
1572 
1573 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1574 			family = xfrm[i]->props.family;
1575 			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1576 					      family);
1577 			err = PTR_ERR(dst);
1578 			if (IS_ERR(dst))
1579 				goto put_states;
1580 		} else
1581 			dst_hold(dst);
1582 
1583 		dst1->xfrm = xfrm[i];
1584 		xdst->xfrm_genid = xfrm[i]->genid;
1585 
1586 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1587 		dst1->flags |= DST_HOST;
1588 		dst1->lastuse = now;
1589 
1590 		dst1->input = dst_discard;
1591 		dst1->output = inner_mode->afinfo->output;
1592 
1593 		dst1->next = dst_prev;
1594 		dst_prev = dst1;
1595 
1596 		header_len += xfrm[i]->props.header_len;
1597 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1598 			nfheader_len += xfrm[i]->props.header_len;
1599 		trailer_len += xfrm[i]->props.trailer_len;
1600 	}
1601 
1602 	dst_prev->child = dst;
1603 	dst0->path = dst;
1604 
1605 	err = -ENODEV;
1606 	dev = dst->dev;
1607 	if (!dev)
1608 		goto free_dst;
1609 
1610 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1611 	xfrm_init_pmtu(dst_prev);
1612 
1613 	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1614 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1615 
1616 		err = xfrm_fill_dst(xdst, dev, fl);
1617 		if (err)
1618 			goto free_dst;
1619 
1620 		dst_prev->header_len = header_len;
1621 		dst_prev->trailer_len = trailer_len;
1622 		header_len -= xdst->u.dst.xfrm->props.header_len;
1623 		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1624 	}
1625 
1626 out:
1627 	return dst0;
1628 
1629 put_states:
1630 	for (; i < nx; i++)
1631 		xfrm_state_put(xfrm[i]);
1632 free_dst:
1633 	if (dst0)
1634 		dst_free(dst0);
1635 	dst0 = ERR_PTR(err);
1636 	goto out;
1637 }
1638 
1639 static int inline
1640 xfrm_dst_alloc_copy(void **target, const void *src, int size)
1641 {
1642 	if (!*target) {
1643 		*target = kmalloc(size, GFP_ATOMIC);
1644 		if (!*target)
1645 			return -ENOMEM;
1646 	}
1647 	memcpy(*target, src, size);
1648 	return 0;
1649 }
1650 
1651 static int inline
1652 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
1653 {
1654 #ifdef CONFIG_XFRM_SUB_POLICY
1655 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1656 	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1657 				   sel, sizeof(*sel));
1658 #else
1659 	return 0;
1660 #endif
1661 }
1662 
1663 static int inline
1664 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
1665 {
1666 #ifdef CONFIG_XFRM_SUB_POLICY
1667 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1668 	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1669 #else
1670 	return 0;
1671 #endif
1672 }
1673 
1674 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1675 				struct xfrm_policy **pols,
1676 				int *num_pols, int *num_xfrms)
1677 {
1678 	int i;
1679 
1680 	if (*num_pols == 0 || !pols[0]) {
1681 		*num_pols = 0;
1682 		*num_xfrms = 0;
1683 		return 0;
1684 	}
1685 	if (IS_ERR(pols[0]))
1686 		return PTR_ERR(pols[0]);
1687 
1688 	*num_xfrms = pols[0]->xfrm_nr;
1689 
1690 #ifdef CONFIG_XFRM_SUB_POLICY
1691 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1692 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1693 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1694 						    XFRM_POLICY_TYPE_MAIN,
1695 						    fl, family,
1696 						    XFRM_POLICY_OUT);
1697 		if (pols[1]) {
1698 			if (IS_ERR(pols[1])) {
1699 				xfrm_pols_put(pols, *num_pols);
1700 				return PTR_ERR(pols[1]);
1701 			}
1702 			(*num_pols) ++;
1703 			(*num_xfrms) += pols[1]->xfrm_nr;
1704 		}
1705 	}
1706 #endif
1707 	for (i = 0; i < *num_pols; i++) {
1708 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1709 			*num_xfrms = -1;
1710 			break;
1711 		}
1712 	}
1713 
1714 	return 0;
1715 
1716 }
1717 
1718 static struct xfrm_dst *
1719 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1720 			       const struct flowi *fl, u16 family,
1721 			       struct dst_entry *dst_orig)
1722 {
1723 	struct net *net = xp_net(pols[0]);
1724 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1725 	struct dst_entry *dst;
1726 	struct xfrm_dst *xdst;
1727 	int err;
1728 
1729 	/* Try to instantiate a bundle */
1730 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1731 	if (err <= 0) {
1732 		if (err != 0 && err != -EAGAIN)
1733 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1734 		return ERR_PTR(err);
1735 	}
1736 
1737 	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1738 	if (IS_ERR(dst)) {
1739 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1740 		return ERR_CAST(dst);
1741 	}
1742 
1743 	xdst = (struct xfrm_dst *)dst;
1744 	xdst->num_xfrms = err;
1745 	if (num_pols > 1)
1746 		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1747 	else
1748 		err = xfrm_dst_update_origin(dst, fl);
1749 	if (unlikely(err)) {
1750 		dst_free(dst);
1751 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1752 		return ERR_PTR(err);
1753 	}
1754 
1755 	xdst->num_pols = num_pols;
1756 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1757 	xdst->policy_genid = atomic_read(&pols[0]->genid);
1758 
1759 	return xdst;
1760 }
1761 
1762 static void xfrm_policy_queue_process(unsigned long arg)
1763 {
1764 	int err = 0;
1765 	struct sk_buff *skb;
1766 	struct sock *sk;
1767 	struct dst_entry *dst;
1768 	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1769 	struct xfrm_policy_queue *pq = &pol->polq;
1770 	struct flowi fl;
1771 	struct sk_buff_head list;
1772 
1773 	spin_lock(&pq->hold_queue.lock);
1774 	skb = skb_peek(&pq->hold_queue);
1775 	if (!skb) {
1776 		spin_unlock(&pq->hold_queue.lock);
1777 		goto out;
1778 	}
1779 	dst = skb_dst(skb);
1780 	sk = skb->sk;
1781 	xfrm_decode_session(skb, &fl, dst->ops->family);
1782 	spin_unlock(&pq->hold_queue.lock);
1783 
1784 	dst_hold(dst->path);
1785 	dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1786 			  sk, 0);
1787 	if (IS_ERR(dst))
1788 		goto purge_queue;
1789 
1790 	if (dst->flags & DST_XFRM_QUEUE) {
1791 		dst_release(dst);
1792 
1793 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1794 			goto purge_queue;
1795 
1796 		pq->timeout = pq->timeout << 1;
1797 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1798 			xfrm_pol_hold(pol);
1799 	goto out;
1800 	}
1801 
1802 	dst_release(dst);
1803 
1804 	__skb_queue_head_init(&list);
1805 
1806 	spin_lock(&pq->hold_queue.lock);
1807 	pq->timeout = 0;
1808 	skb_queue_splice_init(&pq->hold_queue, &list);
1809 	spin_unlock(&pq->hold_queue.lock);
1810 
1811 	while (!skb_queue_empty(&list)) {
1812 		skb = __skb_dequeue(&list);
1813 
1814 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1815 		dst_hold(skb_dst(skb)->path);
1816 		dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1817 				  &fl, skb->sk, 0);
1818 		if (IS_ERR(dst)) {
1819 			kfree_skb(skb);
1820 			continue;
1821 		}
1822 
1823 		nf_reset(skb);
1824 		skb_dst_drop(skb);
1825 		skb_dst_set(skb, dst);
1826 
1827 		err = dst_output(skb);
1828 	}
1829 
1830 out:
1831 	xfrm_pol_put(pol);
1832 	return;
1833 
1834 purge_queue:
1835 	pq->timeout = 0;
1836 	xfrm_queue_purge(&pq->hold_queue);
1837 	xfrm_pol_put(pol);
1838 }
1839 
1840 static int xdst_queue_output(struct sk_buff *skb)
1841 {
1842 	unsigned long sched_next;
1843 	struct dst_entry *dst = skb_dst(skb);
1844 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1845 	struct xfrm_policy *pol = xdst->pols[0];
1846 	struct xfrm_policy_queue *pq = &pol->polq;
1847 	const struct sk_buff *fclone = skb + 1;
1848 
1849 	if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
1850 		     fclone->fclone == SKB_FCLONE_CLONE)) {
1851 		kfree_skb(skb);
1852 		return 0;
1853 	}
1854 
1855 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1856 		kfree_skb(skb);
1857 		return -EAGAIN;
1858 	}
1859 
1860 	skb_dst_force(skb);
1861 
1862 	spin_lock_bh(&pq->hold_queue.lock);
1863 
1864 	if (!pq->timeout)
1865 		pq->timeout = XFRM_QUEUE_TMO_MIN;
1866 
1867 	sched_next = jiffies + pq->timeout;
1868 
1869 	if (del_timer(&pq->hold_timer)) {
1870 		if (time_before(pq->hold_timer.expires, sched_next))
1871 			sched_next = pq->hold_timer.expires;
1872 		xfrm_pol_put(pol);
1873 	}
1874 
1875 	__skb_queue_tail(&pq->hold_queue, skb);
1876 	if (!mod_timer(&pq->hold_timer, sched_next))
1877 		xfrm_pol_hold(pol);
1878 
1879 	spin_unlock_bh(&pq->hold_queue.lock);
1880 
1881 	return 0;
1882 }
1883 
1884 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1885 						 struct dst_entry *dst,
1886 						 const struct flowi *fl,
1887 						 int num_xfrms,
1888 						 u16 family)
1889 {
1890 	int err;
1891 	struct net_device *dev;
1892 	struct dst_entry *dst1;
1893 	struct xfrm_dst *xdst;
1894 
1895 	xdst = xfrm_alloc_dst(net, family);
1896 	if (IS_ERR(xdst))
1897 		return xdst;
1898 
1899 	if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0 ||
1900 	    (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP))
1901 		return xdst;
1902 
1903 	dst1 = &xdst->u.dst;
1904 	dst_hold(dst);
1905 	xdst->route = dst;
1906 
1907 	dst_copy_metrics(dst1, dst);
1908 
1909 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1910 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1911 	dst1->lastuse = jiffies;
1912 
1913 	dst1->input = dst_discard;
1914 	dst1->output = xdst_queue_output;
1915 
1916 	dst_hold(dst);
1917 	dst1->child = dst;
1918 	dst1->path = dst;
1919 
1920 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1921 
1922 	err = -ENODEV;
1923 	dev = dst->dev;
1924 	if (!dev)
1925 		goto free_dst;
1926 
1927 	err = xfrm_fill_dst(xdst, dev, fl);
1928 	if (err)
1929 		goto free_dst;
1930 
1931 out:
1932 	return xdst;
1933 
1934 free_dst:
1935 	dst_release(dst1);
1936 	xdst = ERR_PTR(err);
1937 	goto out;
1938 }
1939 
1940 static struct flow_cache_object *
1941 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1942 		   struct flow_cache_object *oldflo, void *ctx)
1943 {
1944 	struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1945 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1946 	struct xfrm_dst *xdst, *new_xdst;
1947 	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1948 
1949 	/* Check if the policies from old bundle are usable */
1950 	xdst = NULL;
1951 	if (oldflo) {
1952 		xdst = container_of(oldflo, struct xfrm_dst, flo);
1953 		num_pols = xdst->num_pols;
1954 		num_xfrms = xdst->num_xfrms;
1955 		pol_dead = 0;
1956 		for (i = 0; i < num_pols; i++) {
1957 			pols[i] = xdst->pols[i];
1958 			pol_dead |= pols[i]->walk.dead;
1959 		}
1960 		if (pol_dead) {
1961 			dst_free(&xdst->u.dst);
1962 			xdst = NULL;
1963 			num_pols = 0;
1964 			num_xfrms = 0;
1965 			oldflo = NULL;
1966 		}
1967 	}
1968 
1969 	/* Resolve policies to use if we couldn't get them from
1970 	 * previous cache entry */
1971 	if (xdst == NULL) {
1972 		num_pols = 1;
1973 		pols[0] = __xfrm_policy_lookup(net, fl, family,
1974 					       flow_to_policy_dir(dir));
1975 		err = xfrm_expand_policies(fl, family, pols,
1976 					   &num_pols, &num_xfrms);
1977 		if (err < 0)
1978 			goto inc_error;
1979 		if (num_pols == 0)
1980 			return NULL;
1981 		if (num_xfrms <= 0)
1982 			goto make_dummy_bundle;
1983 	}
1984 
1985 	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1986 	if (IS_ERR(new_xdst)) {
1987 		err = PTR_ERR(new_xdst);
1988 		if (err != -EAGAIN)
1989 			goto error;
1990 		if (oldflo == NULL)
1991 			goto make_dummy_bundle;
1992 		dst_hold(&xdst->u.dst);
1993 		return oldflo;
1994 	} else if (new_xdst == NULL) {
1995 		num_xfrms = 0;
1996 		if (oldflo == NULL)
1997 			goto make_dummy_bundle;
1998 		xdst->num_xfrms = 0;
1999 		dst_hold(&xdst->u.dst);
2000 		return oldflo;
2001 	}
2002 
2003 	/* Kill the previous bundle */
2004 	if (xdst) {
2005 		/* The policies were stolen for newly generated bundle */
2006 		xdst->num_pols = 0;
2007 		dst_free(&xdst->u.dst);
2008 	}
2009 
2010 	/* Flow cache does not have reference, it dst_free()'s,
2011 	 * but we do need to return one reference for original caller */
2012 	dst_hold(&new_xdst->u.dst);
2013 	return &new_xdst->flo;
2014 
2015 make_dummy_bundle:
2016 	/* We found policies, but there's no bundles to instantiate:
2017 	 * either because the policy blocks, has no transformations or
2018 	 * we could not build template (no xfrm_states).*/
2019 	xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
2020 	if (IS_ERR(xdst)) {
2021 		xfrm_pols_put(pols, num_pols);
2022 		return ERR_CAST(xdst);
2023 	}
2024 	xdst->num_pols = num_pols;
2025 	xdst->num_xfrms = num_xfrms;
2026 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
2027 
2028 	dst_hold(&xdst->u.dst);
2029 	return &xdst->flo;
2030 
2031 inc_error:
2032 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2033 error:
2034 	if (xdst != NULL)
2035 		dst_free(&xdst->u.dst);
2036 	else
2037 		xfrm_pols_put(pols, num_pols);
2038 	return ERR_PTR(err);
2039 }
2040 
2041 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2042 					struct dst_entry *dst_orig)
2043 {
2044 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2045 	struct dst_entry *ret;
2046 
2047 	if (!afinfo) {
2048 		dst_release(dst_orig);
2049 		return ERR_PTR(-EINVAL);
2050 	} else {
2051 		ret = afinfo->blackhole_route(net, dst_orig);
2052 	}
2053 	xfrm_policy_put_afinfo(afinfo);
2054 
2055 	return ret;
2056 }
2057 
2058 /* Main function: finds/creates a bundle for given flow.
2059  *
2060  * At the moment we eat a raw IP route. Mostly to speed up lookups
2061  * on interfaces with disabled IPsec.
2062  */
2063 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2064 			      const struct flowi *fl,
2065 			      struct sock *sk, int flags)
2066 {
2067 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2068 	struct flow_cache_object *flo;
2069 	struct xfrm_dst *xdst;
2070 	struct dst_entry *dst, *route;
2071 	u16 family = dst_orig->ops->family;
2072 	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2073 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2074 
2075 restart:
2076 	dst = NULL;
2077 	xdst = NULL;
2078 	route = NULL;
2079 
2080 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2081 		num_pols = 1;
2082 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
2083 		err = xfrm_expand_policies(fl, family, pols,
2084 					   &num_pols, &num_xfrms);
2085 		if (err < 0)
2086 			goto dropdst;
2087 
2088 		if (num_pols) {
2089 			if (num_xfrms <= 0) {
2090 				drop_pols = num_pols;
2091 				goto no_transform;
2092 			}
2093 
2094 			xdst = xfrm_resolve_and_create_bundle(
2095 					pols, num_pols, fl,
2096 					family, dst_orig);
2097 			if (IS_ERR(xdst)) {
2098 				xfrm_pols_put(pols, num_pols);
2099 				err = PTR_ERR(xdst);
2100 				goto dropdst;
2101 			} else if (xdst == NULL) {
2102 				num_xfrms = 0;
2103 				drop_pols = num_pols;
2104 				goto no_transform;
2105 			}
2106 
2107 			dst_hold(&xdst->u.dst);
2108 
2109 			spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2110 			xdst->u.dst.next = xfrm_policy_sk_bundles;
2111 			xfrm_policy_sk_bundles = &xdst->u.dst;
2112 			spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2113 
2114 			route = xdst->route;
2115 		}
2116 	}
2117 
2118 	if (xdst == NULL) {
2119 		/* To accelerate a bit...  */
2120 		if ((dst_orig->flags & DST_NOXFRM) ||
2121 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2122 			goto nopol;
2123 
2124 		flo = flow_cache_lookup(net, fl, family, dir,
2125 					xfrm_bundle_lookup, dst_orig);
2126 		if (flo == NULL)
2127 			goto nopol;
2128 		if (IS_ERR(flo)) {
2129 			err = PTR_ERR(flo);
2130 			goto dropdst;
2131 		}
2132 		xdst = container_of(flo, struct xfrm_dst, flo);
2133 
2134 		num_pols = xdst->num_pols;
2135 		num_xfrms = xdst->num_xfrms;
2136 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
2137 		route = xdst->route;
2138 	}
2139 
2140 	dst = &xdst->u.dst;
2141 	if (route == NULL && num_xfrms > 0) {
2142 		/* The only case when xfrm_bundle_lookup() returns a
2143 		 * bundle with null route, is when the template could
2144 		 * not be resolved. It means policies are there, but
2145 		 * bundle could not be created, since we don't yet
2146 		 * have the xfrm_state's. We need to wait for KM to
2147 		 * negotiate new SA's or bail out with error.*/
2148 		if (net->xfrm.sysctl_larval_drop) {
2149 			dst_release(dst);
2150 			xfrm_pols_put(pols, drop_pols);
2151 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2152 
2153 			return make_blackhole(net, family, dst_orig);
2154 		}
2155 		if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) {
2156 			DECLARE_WAITQUEUE(wait, current);
2157 
2158 			add_wait_queue(&net->xfrm.km_waitq, &wait);
2159 			set_current_state(TASK_INTERRUPTIBLE);
2160 			schedule();
2161 			set_current_state(TASK_RUNNING);
2162 			remove_wait_queue(&net->xfrm.km_waitq, &wait);
2163 
2164 			if (!signal_pending(current)) {
2165 				dst_release(dst);
2166 				goto restart;
2167 			}
2168 
2169 			err = -ERESTART;
2170 		} else
2171 			err = -EAGAIN;
2172 
2173 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2174 		goto error;
2175 	}
2176 
2177 no_transform:
2178 	if (num_pols == 0)
2179 		goto nopol;
2180 
2181 	if ((flags & XFRM_LOOKUP_ICMP) &&
2182 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2183 		err = -ENOENT;
2184 		goto error;
2185 	}
2186 
2187 	for (i = 0; i < num_pols; i++)
2188 		pols[i]->curlft.use_time = get_seconds();
2189 
2190 	if (num_xfrms < 0) {
2191 		/* Prohibit the flow */
2192 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2193 		err = -EPERM;
2194 		goto error;
2195 	} else if (num_xfrms > 0) {
2196 		/* Flow transformed */
2197 		dst_release(dst_orig);
2198 	} else {
2199 		/* Flow passes untransformed */
2200 		dst_release(dst);
2201 		dst = dst_orig;
2202 	}
2203 ok:
2204 	xfrm_pols_put(pols, drop_pols);
2205 	if (dst && dst->xfrm &&
2206 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2207 		dst->flags |= DST_XFRM_TUNNEL;
2208 	return dst;
2209 
2210 nopol:
2211 	if (!(flags & XFRM_LOOKUP_ICMP)) {
2212 		dst = dst_orig;
2213 		goto ok;
2214 	}
2215 	err = -ENOENT;
2216 error:
2217 	dst_release(dst);
2218 dropdst:
2219 	dst_release(dst_orig);
2220 	xfrm_pols_put(pols, drop_pols);
2221 	return ERR_PTR(err);
2222 }
2223 EXPORT_SYMBOL(xfrm_lookup);
2224 
2225 static inline int
2226 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2227 {
2228 	struct xfrm_state *x;
2229 
2230 	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2231 		return 0;
2232 	x = skb->sp->xvec[idx];
2233 	if (!x->type->reject)
2234 		return 0;
2235 	return x->type->reject(x, skb, fl);
2236 }
2237 
2238 /* When skb is transformed back to its "native" form, we have to
2239  * check policy restrictions. At the moment we make this in maximally
2240  * stupid way. Shame on me. :-) Of course, connected sockets must
2241  * have policy cached at them.
2242  */
2243 
2244 static inline int
2245 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2246 	      unsigned short family)
2247 {
2248 	if (xfrm_state_kern(x))
2249 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2250 	return	x->id.proto == tmpl->id.proto &&
2251 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2252 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2253 		x->props.mode == tmpl->mode &&
2254 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2255 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2256 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2257 		  xfrm_state_addr_cmp(tmpl, x, family));
2258 }
2259 
2260 /*
2261  * 0 or more than 0 is returned when validation is succeeded (either bypass
2262  * because of optional transport mode, or next index of the mathced secpath
2263  * state with the template.
2264  * -1 is returned when no matching template is found.
2265  * Otherwise "-2 - errored_index" is returned.
2266  */
2267 static inline int
2268 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2269 	       unsigned short family)
2270 {
2271 	int idx = start;
2272 
2273 	if (tmpl->optional) {
2274 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2275 			return start;
2276 	} else
2277 		start = -1;
2278 	for (; idx < sp->len; idx++) {
2279 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2280 			return ++idx;
2281 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2282 			if (start == -1)
2283 				start = -2-idx;
2284 			break;
2285 		}
2286 	}
2287 	return start;
2288 }
2289 
2290 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2291 			  unsigned int family, int reverse)
2292 {
2293 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2294 	int err;
2295 
2296 	if (unlikely(afinfo == NULL))
2297 		return -EAFNOSUPPORT;
2298 
2299 	afinfo->decode_session(skb, fl, reverse);
2300 	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2301 	xfrm_policy_put_afinfo(afinfo);
2302 	return err;
2303 }
2304 EXPORT_SYMBOL(__xfrm_decode_session);
2305 
2306 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2307 {
2308 	for (; k < sp->len; k++) {
2309 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2310 			*idxp = k;
2311 			return 1;
2312 		}
2313 	}
2314 
2315 	return 0;
2316 }
2317 
2318 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2319 			unsigned short family)
2320 {
2321 	struct net *net = dev_net(skb->dev);
2322 	struct xfrm_policy *pol;
2323 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2324 	int npols = 0;
2325 	int xfrm_nr;
2326 	int pi;
2327 	int reverse;
2328 	struct flowi fl;
2329 	u8 fl_dir;
2330 	int xerr_idx = -1;
2331 
2332 	reverse = dir & ~XFRM_POLICY_MASK;
2333 	dir &= XFRM_POLICY_MASK;
2334 	fl_dir = policy_to_flow_dir(dir);
2335 
2336 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2337 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2338 		return 0;
2339 	}
2340 
2341 	nf_nat_decode_session(skb, &fl, family);
2342 
2343 	/* First, check used SA against their selectors. */
2344 	if (skb->sp) {
2345 		int i;
2346 
2347 		for (i=skb->sp->len-1; i>=0; i--) {
2348 			struct xfrm_state *x = skb->sp->xvec[i];
2349 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2350 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2351 				return 0;
2352 			}
2353 		}
2354 	}
2355 
2356 	pol = NULL;
2357 	if (sk && sk->sk_policy[dir]) {
2358 		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2359 		if (IS_ERR(pol)) {
2360 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2361 			return 0;
2362 		}
2363 	}
2364 
2365 	if (!pol) {
2366 		struct flow_cache_object *flo;
2367 
2368 		flo = flow_cache_lookup(net, &fl, family, fl_dir,
2369 					xfrm_policy_lookup, NULL);
2370 		if (IS_ERR_OR_NULL(flo))
2371 			pol = ERR_CAST(flo);
2372 		else
2373 			pol = container_of(flo, struct xfrm_policy, flo);
2374 	}
2375 
2376 	if (IS_ERR(pol)) {
2377 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2378 		return 0;
2379 	}
2380 
2381 	if (!pol) {
2382 		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2383 			xfrm_secpath_reject(xerr_idx, skb, &fl);
2384 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2385 			return 0;
2386 		}
2387 		return 1;
2388 	}
2389 
2390 	pol->curlft.use_time = get_seconds();
2391 
2392 	pols[0] = pol;
2393 	npols ++;
2394 #ifdef CONFIG_XFRM_SUB_POLICY
2395 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2396 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2397 						    &fl, family,
2398 						    XFRM_POLICY_IN);
2399 		if (pols[1]) {
2400 			if (IS_ERR(pols[1])) {
2401 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2402 				return 0;
2403 			}
2404 			pols[1]->curlft.use_time = get_seconds();
2405 			npols ++;
2406 		}
2407 	}
2408 #endif
2409 
2410 	if (pol->action == XFRM_POLICY_ALLOW) {
2411 		struct sec_path *sp;
2412 		static struct sec_path dummy;
2413 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2414 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2415 		struct xfrm_tmpl **tpp = tp;
2416 		int ti = 0;
2417 		int i, k;
2418 
2419 		if ((sp = skb->sp) == NULL)
2420 			sp = &dummy;
2421 
2422 		for (pi = 0; pi < npols; pi++) {
2423 			if (pols[pi] != pol &&
2424 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2425 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2426 				goto reject;
2427 			}
2428 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2429 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2430 				goto reject_error;
2431 			}
2432 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2433 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2434 		}
2435 		xfrm_nr = ti;
2436 		if (npols > 1) {
2437 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
2438 			tpp = stp;
2439 		}
2440 
2441 		/* For each tunnel xfrm, find the first matching tmpl.
2442 		 * For each tmpl before that, find corresponding xfrm.
2443 		 * Order is _important_. Later we will implement
2444 		 * some barriers, but at the moment barriers
2445 		 * are implied between each two transformations.
2446 		 */
2447 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2448 			k = xfrm_policy_ok(tpp[i], sp, k, family);
2449 			if (k < 0) {
2450 				if (k < -1)
2451 					/* "-2 - errored_index" returned */
2452 					xerr_idx = -(2+k);
2453 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2454 				goto reject;
2455 			}
2456 		}
2457 
2458 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2459 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2460 			goto reject;
2461 		}
2462 
2463 		xfrm_pols_put(pols, npols);
2464 		return 1;
2465 	}
2466 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2467 
2468 reject:
2469 	xfrm_secpath_reject(xerr_idx, skb, &fl);
2470 reject_error:
2471 	xfrm_pols_put(pols, npols);
2472 	return 0;
2473 }
2474 EXPORT_SYMBOL(__xfrm_policy_check);
2475 
2476 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2477 {
2478 	struct net *net = dev_net(skb->dev);
2479 	struct flowi fl;
2480 	struct dst_entry *dst;
2481 	int res = 1;
2482 
2483 	if (xfrm_decode_session(skb, &fl, family) < 0) {
2484 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2485 		return 0;
2486 	}
2487 
2488 	skb_dst_force(skb);
2489 
2490 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2491 	if (IS_ERR(dst)) {
2492 		res = 0;
2493 		dst = NULL;
2494 	}
2495 	skb_dst_set(skb, dst);
2496 	return res;
2497 }
2498 EXPORT_SYMBOL(__xfrm_route_forward);
2499 
2500 /* Optimize later using cookies and generation ids. */
2501 
2502 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2503 {
2504 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2505 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2506 	 * get validated by dst_ops->check on every use.  We do this
2507 	 * because when a normal route referenced by an XFRM dst is
2508 	 * obsoleted we do not go looking around for all parent
2509 	 * referencing XFRM dsts so that we can invalidate them.  It
2510 	 * is just too much work.  Instead we make the checks here on
2511 	 * every use.  For example:
2512 	 *
2513 	 *	XFRM dst A --> IPv4 dst X
2514 	 *
2515 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2516 	 * in this example).  If X is marked obsolete, "A" will not
2517 	 * notice.  That's what we are validating here via the
2518 	 * stale_bundle() check.
2519 	 *
2520 	 * When a policy's bundle is pruned, we dst_free() the XFRM
2521 	 * dst which causes it's ->obsolete field to be set to
2522 	 * DST_OBSOLETE_DEAD.  If an XFRM dst has been pruned like
2523 	 * this, we want to force a new route lookup.
2524 	 */
2525 	if (dst->obsolete < 0 && !stale_bundle(dst))
2526 		return dst;
2527 
2528 	return NULL;
2529 }
2530 
2531 static int stale_bundle(struct dst_entry *dst)
2532 {
2533 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2534 }
2535 
2536 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2537 {
2538 	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2539 		dst->dev = dev_net(dev)->loopback_dev;
2540 		dev_hold(dst->dev);
2541 		dev_put(dev);
2542 	}
2543 }
2544 EXPORT_SYMBOL(xfrm_dst_ifdown);
2545 
2546 static void xfrm_link_failure(struct sk_buff *skb)
2547 {
2548 	/* Impossible. Such dst must be popped before reaches point of failure. */
2549 }
2550 
2551 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2552 {
2553 	if (dst) {
2554 		if (dst->obsolete) {
2555 			dst_release(dst);
2556 			dst = NULL;
2557 		}
2558 	}
2559 	return dst;
2560 }
2561 
2562 static void __xfrm_garbage_collect(struct net *net)
2563 {
2564 	struct dst_entry *head, *next;
2565 
2566 	spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2567 	head = xfrm_policy_sk_bundles;
2568 	xfrm_policy_sk_bundles = NULL;
2569 	spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2570 
2571 	while (head) {
2572 		next = head->next;
2573 		dst_free(head);
2574 		head = next;
2575 	}
2576 }
2577 
2578 void xfrm_garbage_collect(struct net *net)
2579 {
2580 	flow_cache_flush();
2581 	__xfrm_garbage_collect(net);
2582 }
2583 EXPORT_SYMBOL(xfrm_garbage_collect);
2584 
2585 static void xfrm_garbage_collect_deferred(struct net *net)
2586 {
2587 	flow_cache_flush_deferred();
2588 	__xfrm_garbage_collect(net);
2589 }
2590 
2591 static void xfrm_init_pmtu(struct dst_entry *dst)
2592 {
2593 	do {
2594 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2595 		u32 pmtu, route_mtu_cached;
2596 
2597 		pmtu = dst_mtu(dst->child);
2598 		xdst->child_mtu_cached = pmtu;
2599 
2600 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2601 
2602 		route_mtu_cached = dst_mtu(xdst->route);
2603 		xdst->route_mtu_cached = route_mtu_cached;
2604 
2605 		if (pmtu > route_mtu_cached)
2606 			pmtu = route_mtu_cached;
2607 
2608 		dst_metric_set(dst, RTAX_MTU, pmtu);
2609 	} while ((dst = dst->next));
2610 }
2611 
2612 /* Check that the bundle accepts the flow and its components are
2613  * still valid.
2614  */
2615 
2616 static int xfrm_bundle_ok(struct xfrm_dst *first)
2617 {
2618 	struct dst_entry *dst = &first->u.dst;
2619 	struct xfrm_dst *last;
2620 	u32 mtu;
2621 
2622 	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2623 	    (dst->dev && !netif_running(dst->dev)))
2624 		return 0;
2625 
2626 	if (dst->flags & DST_XFRM_QUEUE)
2627 		return 1;
2628 
2629 	last = NULL;
2630 
2631 	do {
2632 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2633 
2634 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2635 			return 0;
2636 		if (xdst->xfrm_genid != dst->xfrm->genid)
2637 			return 0;
2638 		if (xdst->num_pols > 0 &&
2639 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2640 			return 0;
2641 
2642 		mtu = dst_mtu(dst->child);
2643 		if (xdst->child_mtu_cached != mtu) {
2644 			last = xdst;
2645 			xdst->child_mtu_cached = mtu;
2646 		}
2647 
2648 		if (!dst_check(xdst->route, xdst->route_cookie))
2649 			return 0;
2650 		mtu = dst_mtu(xdst->route);
2651 		if (xdst->route_mtu_cached != mtu) {
2652 			last = xdst;
2653 			xdst->route_mtu_cached = mtu;
2654 		}
2655 
2656 		dst = dst->child;
2657 	} while (dst->xfrm);
2658 
2659 	if (likely(!last))
2660 		return 1;
2661 
2662 	mtu = last->child_mtu_cached;
2663 	for (;;) {
2664 		dst = &last->u.dst;
2665 
2666 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2667 		if (mtu > last->route_mtu_cached)
2668 			mtu = last->route_mtu_cached;
2669 		dst_metric_set(dst, RTAX_MTU, mtu);
2670 
2671 		if (last == first)
2672 			break;
2673 
2674 		last = (struct xfrm_dst *)last->u.dst.next;
2675 		last->child_mtu_cached = mtu;
2676 	}
2677 
2678 	return 1;
2679 }
2680 
2681 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2682 {
2683 	return dst_metric_advmss(dst->path);
2684 }
2685 
2686 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2687 {
2688 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2689 
2690 	return mtu ? : dst_mtu(dst->path);
2691 }
2692 
2693 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2694 					   struct sk_buff *skb,
2695 					   const void *daddr)
2696 {
2697 	return dst->path->ops->neigh_lookup(dst, skb, daddr);
2698 }
2699 
2700 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2701 {
2702 	struct net *net;
2703 	int err = 0;
2704 	if (unlikely(afinfo == NULL))
2705 		return -EINVAL;
2706 	if (unlikely(afinfo->family >= NPROTO))
2707 		return -EAFNOSUPPORT;
2708 	spin_lock(&xfrm_policy_afinfo_lock);
2709 	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2710 		err = -ENOBUFS;
2711 	else {
2712 		struct dst_ops *dst_ops = afinfo->dst_ops;
2713 		if (likely(dst_ops->kmem_cachep == NULL))
2714 			dst_ops->kmem_cachep = xfrm_dst_cache;
2715 		if (likely(dst_ops->check == NULL))
2716 			dst_ops->check = xfrm_dst_check;
2717 		if (likely(dst_ops->default_advmss == NULL))
2718 			dst_ops->default_advmss = xfrm_default_advmss;
2719 		if (likely(dst_ops->mtu == NULL))
2720 			dst_ops->mtu = xfrm_mtu;
2721 		if (likely(dst_ops->negative_advice == NULL))
2722 			dst_ops->negative_advice = xfrm_negative_advice;
2723 		if (likely(dst_ops->link_failure == NULL))
2724 			dst_ops->link_failure = xfrm_link_failure;
2725 		if (likely(dst_ops->neigh_lookup == NULL))
2726 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2727 		if (likely(afinfo->garbage_collect == NULL))
2728 			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2729 		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2730 	}
2731 	spin_unlock(&xfrm_policy_afinfo_lock);
2732 
2733 	rtnl_lock();
2734 	for_each_net(net) {
2735 		struct dst_ops *xfrm_dst_ops;
2736 
2737 		switch (afinfo->family) {
2738 		case AF_INET:
2739 			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2740 			break;
2741 #if IS_ENABLED(CONFIG_IPV6)
2742 		case AF_INET6:
2743 			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2744 			break;
2745 #endif
2746 		default:
2747 			BUG();
2748 		}
2749 		*xfrm_dst_ops = *afinfo->dst_ops;
2750 	}
2751 	rtnl_unlock();
2752 
2753 	return err;
2754 }
2755 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2756 
2757 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2758 {
2759 	int err = 0;
2760 	if (unlikely(afinfo == NULL))
2761 		return -EINVAL;
2762 	if (unlikely(afinfo->family >= NPROTO))
2763 		return -EAFNOSUPPORT;
2764 	spin_lock(&xfrm_policy_afinfo_lock);
2765 	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2766 		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2767 			err = -EINVAL;
2768 		else
2769 			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2770 					 NULL);
2771 	}
2772 	spin_unlock(&xfrm_policy_afinfo_lock);
2773 	if (!err) {
2774 		struct dst_ops *dst_ops = afinfo->dst_ops;
2775 
2776 		synchronize_rcu();
2777 
2778 		dst_ops->kmem_cachep = NULL;
2779 		dst_ops->check = NULL;
2780 		dst_ops->negative_advice = NULL;
2781 		dst_ops->link_failure = NULL;
2782 		afinfo->garbage_collect = NULL;
2783 	}
2784 	return err;
2785 }
2786 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2787 
2788 static void __net_init xfrm_dst_ops_init(struct net *net)
2789 {
2790 	struct xfrm_policy_afinfo *afinfo;
2791 
2792 	rcu_read_lock();
2793 	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2794 	if (afinfo)
2795 		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2796 #if IS_ENABLED(CONFIG_IPV6)
2797 	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2798 	if (afinfo)
2799 		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2800 #endif
2801 	rcu_read_unlock();
2802 }
2803 
2804 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2805 {
2806 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2807 
2808 	switch (event) {
2809 	case NETDEV_DOWN:
2810 		xfrm_garbage_collect(dev_net(dev));
2811 	}
2812 	return NOTIFY_DONE;
2813 }
2814 
2815 static struct notifier_block xfrm_dev_notifier = {
2816 	.notifier_call	= xfrm_dev_event,
2817 };
2818 
2819 #ifdef CONFIG_XFRM_STATISTICS
2820 static int __net_init xfrm_statistics_init(struct net *net)
2821 {
2822 	int rv;
2823 
2824 	if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2825 			  sizeof(struct linux_xfrm_mib),
2826 			  __alignof__(struct linux_xfrm_mib)) < 0)
2827 		return -ENOMEM;
2828 	rv = xfrm_proc_init(net);
2829 	if (rv < 0)
2830 		snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2831 	return rv;
2832 }
2833 
2834 static void xfrm_statistics_fini(struct net *net)
2835 {
2836 	xfrm_proc_fini(net);
2837 	snmp_mib_free((void __percpu **)net->mib.xfrm_statistics);
2838 }
2839 #else
2840 static int __net_init xfrm_statistics_init(struct net *net)
2841 {
2842 	return 0;
2843 }
2844 
2845 static void xfrm_statistics_fini(struct net *net)
2846 {
2847 }
2848 #endif
2849 
2850 static int __net_init xfrm_policy_init(struct net *net)
2851 {
2852 	unsigned int hmask, sz;
2853 	int dir;
2854 
2855 	if (net_eq(net, &init_net))
2856 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2857 					   sizeof(struct xfrm_dst),
2858 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2859 					   NULL);
2860 
2861 	hmask = 8 - 1;
2862 	sz = (hmask+1) * sizeof(struct hlist_head);
2863 
2864 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2865 	if (!net->xfrm.policy_byidx)
2866 		goto out_byidx;
2867 	net->xfrm.policy_idx_hmask = hmask;
2868 
2869 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2870 		struct xfrm_policy_hash *htab;
2871 
2872 		net->xfrm.policy_count[dir] = 0;
2873 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2874 
2875 		htab = &net->xfrm.policy_bydst[dir];
2876 		htab->table = xfrm_hash_alloc(sz);
2877 		if (!htab->table)
2878 			goto out_bydst;
2879 		htab->hmask = hmask;
2880 	}
2881 
2882 	INIT_LIST_HEAD(&net->xfrm.policy_all);
2883 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2884 	if (net_eq(net, &init_net))
2885 		register_netdevice_notifier(&xfrm_dev_notifier);
2886 	return 0;
2887 
2888 out_bydst:
2889 	for (dir--; dir >= 0; dir--) {
2890 		struct xfrm_policy_hash *htab;
2891 
2892 		htab = &net->xfrm.policy_bydst[dir];
2893 		xfrm_hash_free(htab->table, sz);
2894 	}
2895 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2896 out_byidx:
2897 	return -ENOMEM;
2898 }
2899 
2900 static void xfrm_policy_fini(struct net *net)
2901 {
2902 	struct xfrm_audit audit_info;
2903 	unsigned int sz;
2904 	int dir;
2905 
2906 	flush_work(&net->xfrm.policy_hash_work);
2907 #ifdef CONFIG_XFRM_SUB_POLICY
2908 	audit_info.loginuid = INVALID_UID;
2909 	audit_info.sessionid = -1;
2910 	audit_info.secid = 0;
2911 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2912 #endif
2913 	audit_info.loginuid = INVALID_UID;
2914 	audit_info.sessionid = -1;
2915 	audit_info.secid = 0;
2916 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2917 
2918 	WARN_ON(!list_empty(&net->xfrm.policy_all));
2919 
2920 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2921 		struct xfrm_policy_hash *htab;
2922 
2923 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2924 
2925 		htab = &net->xfrm.policy_bydst[dir];
2926 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2927 		WARN_ON(!hlist_empty(htab->table));
2928 		xfrm_hash_free(htab->table, sz);
2929 	}
2930 
2931 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2932 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2933 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2934 }
2935 
2936 static int __net_init xfrm_net_init(struct net *net)
2937 {
2938 	int rv;
2939 
2940 	rv = xfrm_statistics_init(net);
2941 	if (rv < 0)
2942 		goto out_statistics;
2943 	rv = xfrm_state_init(net);
2944 	if (rv < 0)
2945 		goto out_state;
2946 	rv = xfrm_policy_init(net);
2947 	if (rv < 0)
2948 		goto out_policy;
2949 	xfrm_dst_ops_init(net);
2950 	rv = xfrm_sysctl_init(net);
2951 	if (rv < 0)
2952 		goto out_sysctl;
2953 	return 0;
2954 
2955 out_sysctl:
2956 	xfrm_policy_fini(net);
2957 out_policy:
2958 	xfrm_state_fini(net);
2959 out_state:
2960 	xfrm_statistics_fini(net);
2961 out_statistics:
2962 	return rv;
2963 }
2964 
2965 static void __net_exit xfrm_net_exit(struct net *net)
2966 {
2967 	xfrm_sysctl_fini(net);
2968 	xfrm_policy_fini(net);
2969 	xfrm_state_fini(net);
2970 	xfrm_statistics_fini(net);
2971 }
2972 
2973 static struct pernet_operations __net_initdata xfrm_net_ops = {
2974 	.init = xfrm_net_init,
2975 	.exit = xfrm_net_exit,
2976 };
2977 
2978 void __init xfrm_init(void)
2979 {
2980 	register_pernet_subsys(&xfrm_net_ops);
2981 	xfrm_input_init();
2982 }
2983 
2984 #ifdef CONFIG_AUDITSYSCALL
2985 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2986 					 struct audit_buffer *audit_buf)
2987 {
2988 	struct xfrm_sec_ctx *ctx = xp->security;
2989 	struct xfrm_selector *sel = &xp->selector;
2990 
2991 	if (ctx)
2992 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2993 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2994 
2995 	switch(sel->family) {
2996 	case AF_INET:
2997 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2998 		if (sel->prefixlen_s != 32)
2999 			audit_log_format(audit_buf, " src_prefixlen=%d",
3000 					 sel->prefixlen_s);
3001 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3002 		if (sel->prefixlen_d != 32)
3003 			audit_log_format(audit_buf, " dst_prefixlen=%d",
3004 					 sel->prefixlen_d);
3005 		break;
3006 	case AF_INET6:
3007 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3008 		if (sel->prefixlen_s != 128)
3009 			audit_log_format(audit_buf, " src_prefixlen=%d",
3010 					 sel->prefixlen_s);
3011 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3012 		if (sel->prefixlen_d != 128)
3013 			audit_log_format(audit_buf, " dst_prefixlen=%d",
3014 					 sel->prefixlen_d);
3015 		break;
3016 	}
3017 }
3018 
3019 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
3020 			   kuid_t auid, u32 sessionid, u32 secid)
3021 {
3022 	struct audit_buffer *audit_buf;
3023 
3024 	audit_buf = xfrm_audit_start("SPD-add");
3025 	if (audit_buf == NULL)
3026 		return;
3027 	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3028 	audit_log_format(audit_buf, " res=%u", result);
3029 	xfrm_audit_common_policyinfo(xp, audit_buf);
3030 	audit_log_end(audit_buf);
3031 }
3032 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3033 
3034 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3035 			      kuid_t auid, u32 sessionid, u32 secid)
3036 {
3037 	struct audit_buffer *audit_buf;
3038 
3039 	audit_buf = xfrm_audit_start("SPD-delete");
3040 	if (audit_buf == NULL)
3041 		return;
3042 	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
3043 	audit_log_format(audit_buf, " res=%u", result);
3044 	xfrm_audit_common_policyinfo(xp, audit_buf);
3045 	audit_log_end(audit_buf);
3046 }
3047 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3048 #endif
3049 
3050 #ifdef CONFIG_XFRM_MIGRATE
3051 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3052 					const struct xfrm_selector *sel_tgt)
3053 {
3054 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3055 		if (sel_tgt->family == sel_cmp->family &&
3056 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3057 				    sel_cmp->family) &&
3058 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3059 				    sel_cmp->family) &&
3060 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3061 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3062 			return true;
3063 		}
3064 	} else {
3065 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3066 			return true;
3067 		}
3068 	}
3069 	return false;
3070 }
3071 
3072 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3073 						     u8 dir, u8 type)
3074 {
3075 	struct xfrm_policy *pol, *ret = NULL;
3076 	struct hlist_head *chain;
3077 	u32 priority = ~0U;
3078 
3079 	read_lock_bh(&xfrm_policy_lock);
3080 	chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
3081 	hlist_for_each_entry(pol, chain, bydst) {
3082 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3083 		    pol->type == type) {
3084 			ret = pol;
3085 			priority = ret->priority;
3086 			break;
3087 		}
3088 	}
3089 	chain = &init_net.xfrm.policy_inexact[dir];
3090 	hlist_for_each_entry(pol, chain, bydst) {
3091 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3092 		    pol->type == type &&
3093 		    pol->priority < priority) {
3094 			ret = pol;
3095 			break;
3096 		}
3097 	}
3098 
3099 	if (ret)
3100 		xfrm_pol_hold(ret);
3101 
3102 	read_unlock_bh(&xfrm_policy_lock);
3103 
3104 	return ret;
3105 }
3106 
3107 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3108 {
3109 	int match = 0;
3110 
3111 	if (t->mode == m->mode && t->id.proto == m->proto &&
3112 	    (m->reqid == 0 || t->reqid == m->reqid)) {
3113 		switch (t->mode) {
3114 		case XFRM_MODE_TUNNEL:
3115 		case XFRM_MODE_BEET:
3116 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3117 					    m->old_family) &&
3118 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3119 					    m->old_family)) {
3120 				match = 1;
3121 			}
3122 			break;
3123 		case XFRM_MODE_TRANSPORT:
3124 			/* in case of transport mode, template does not store
3125 			   any IP addresses, hence we just compare mode and
3126 			   protocol */
3127 			match = 1;
3128 			break;
3129 		default:
3130 			break;
3131 		}
3132 	}
3133 	return match;
3134 }
3135 
3136 /* update endpoint address(es) of template(s) */
3137 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3138 			       struct xfrm_migrate *m, int num_migrate)
3139 {
3140 	struct xfrm_migrate *mp;
3141 	int i, j, n = 0;
3142 
3143 	write_lock_bh(&pol->lock);
3144 	if (unlikely(pol->walk.dead)) {
3145 		/* target policy has been deleted */
3146 		write_unlock_bh(&pol->lock);
3147 		return -ENOENT;
3148 	}
3149 
3150 	for (i = 0; i < pol->xfrm_nr; i++) {
3151 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3152 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3153 				continue;
3154 			n++;
3155 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3156 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3157 				continue;
3158 			/* update endpoints */
3159 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3160 			       sizeof(pol->xfrm_vec[i].id.daddr));
3161 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3162 			       sizeof(pol->xfrm_vec[i].saddr));
3163 			pol->xfrm_vec[i].encap_family = mp->new_family;
3164 			/* flush bundles */
3165 			atomic_inc(&pol->genid);
3166 		}
3167 	}
3168 
3169 	write_unlock_bh(&pol->lock);
3170 
3171 	if (!n)
3172 		return -ENODATA;
3173 
3174 	return 0;
3175 }
3176 
3177 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3178 {
3179 	int i, j;
3180 
3181 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3182 		return -EINVAL;
3183 
3184 	for (i = 0; i < num_migrate; i++) {
3185 		if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3186 				    m[i].old_family) &&
3187 		    xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3188 				    m[i].old_family))
3189 			return -EINVAL;
3190 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3191 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3192 			return -EINVAL;
3193 
3194 		/* check if there is any duplicated entry */
3195 		for (j = i + 1; j < num_migrate; j++) {
3196 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3197 				    sizeof(m[i].old_daddr)) &&
3198 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3199 				    sizeof(m[i].old_saddr)) &&
3200 			    m[i].proto == m[j].proto &&
3201 			    m[i].mode == m[j].mode &&
3202 			    m[i].reqid == m[j].reqid &&
3203 			    m[i].old_family == m[j].old_family)
3204 				return -EINVAL;
3205 		}
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3212 		 struct xfrm_migrate *m, int num_migrate,
3213 		 struct xfrm_kmaddress *k)
3214 {
3215 	int i, err, nx_cur = 0, nx_new = 0;
3216 	struct xfrm_policy *pol = NULL;
3217 	struct xfrm_state *x, *xc;
3218 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3219 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3220 	struct xfrm_migrate *mp;
3221 
3222 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3223 		goto out;
3224 
3225 	/* Stage 1 - find policy */
3226 	if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
3227 		err = -ENOENT;
3228 		goto out;
3229 	}
3230 
3231 	/* Stage 2 - find and update state(s) */
3232 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3233 		if ((x = xfrm_migrate_state_find(mp))) {
3234 			x_cur[nx_cur] = x;
3235 			nx_cur++;
3236 			if ((xc = xfrm_state_migrate(x, mp))) {
3237 				x_new[nx_new] = xc;
3238 				nx_new++;
3239 			} else {
3240 				err = -ENODATA;
3241 				goto restore_state;
3242 			}
3243 		}
3244 	}
3245 
3246 	/* Stage 3 - update policy */
3247 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3248 		goto restore_state;
3249 
3250 	/* Stage 4 - delete old state(s) */
3251 	if (nx_cur) {
3252 		xfrm_states_put(x_cur, nx_cur);
3253 		xfrm_states_delete(x_cur, nx_cur);
3254 	}
3255 
3256 	/* Stage 5 - announce */
3257 	km_migrate(sel, dir, type, m, num_migrate, k);
3258 
3259 	xfrm_pol_put(pol);
3260 
3261 	return 0;
3262 out:
3263 	return err;
3264 
3265 restore_state:
3266 	if (pol)
3267 		xfrm_pol_put(pol);
3268 	if (nx_cur)
3269 		xfrm_states_put(x_cur, nx_cur);
3270 	if (nx_new)
3271 		xfrm_states_delete(x_new, nx_new);
3272 
3273 	return err;
3274 }
3275 EXPORT_SYMBOL(xfrm_migrate);
3276 #endif
3277