xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision d2999e1b)
1 /*
2  * xfrm_policy.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	Kazunori MIYAZAWA @USAGI
10  * 	YOSHIFUJI Hideaki
11  * 		Split up af-specific portion
12  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13  *
14  */
15 
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
28 #include <net/dst.h>
29 #include <net/flow.h>
30 #include <net/xfrm.h>
31 #include <net/ip.h>
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35 
36 #include "xfrm_hash.h"
37 
38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
40 #define XFRM_MAX_QUEUE_LEN	100
41 
42 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
43 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
44 						__read_mostly;
45 
46 static struct kmem_cache *xfrm_dst_cache __read_mostly;
47 
48 static void xfrm_init_pmtu(struct dst_entry *dst);
49 static int stale_bundle(struct dst_entry *dst);
50 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
51 static void xfrm_policy_queue_process(unsigned long arg);
52 
53 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
54 						int dir);
55 
56 static inline bool
57 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
58 {
59 	const struct flowi4 *fl4 = &fl->u.ip4;
60 
61 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
62 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
63 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
64 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
65 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
66 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
67 }
68 
69 static inline bool
70 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
71 {
72 	const struct flowi6 *fl6 = &fl->u.ip6;
73 
74 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
75 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
76 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
77 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
78 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
79 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
80 }
81 
82 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
83 			 unsigned short family)
84 {
85 	switch (family) {
86 	case AF_INET:
87 		return __xfrm4_selector_match(sel, fl);
88 	case AF_INET6:
89 		return __xfrm6_selector_match(sel, fl);
90 	}
91 	return false;
92 }
93 
94 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
95 {
96 	struct xfrm_policy_afinfo *afinfo;
97 
98 	if (unlikely(family >= NPROTO))
99 		return NULL;
100 	rcu_read_lock();
101 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
102 	if (unlikely(!afinfo))
103 		rcu_read_unlock();
104 	return afinfo;
105 }
106 
107 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
108 {
109 	rcu_read_unlock();
110 }
111 
112 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
113 						  const xfrm_address_t *saddr,
114 						  const xfrm_address_t *daddr,
115 						  int family)
116 {
117 	struct xfrm_policy_afinfo *afinfo;
118 	struct dst_entry *dst;
119 
120 	afinfo = xfrm_policy_get_afinfo(family);
121 	if (unlikely(afinfo == NULL))
122 		return ERR_PTR(-EAFNOSUPPORT);
123 
124 	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
125 
126 	xfrm_policy_put_afinfo(afinfo);
127 
128 	return dst;
129 }
130 
131 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
132 						xfrm_address_t *prev_saddr,
133 						xfrm_address_t *prev_daddr,
134 						int family)
135 {
136 	struct net *net = xs_net(x);
137 	xfrm_address_t *saddr = &x->props.saddr;
138 	xfrm_address_t *daddr = &x->id.daddr;
139 	struct dst_entry *dst;
140 
141 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
142 		saddr = x->coaddr;
143 		daddr = prev_daddr;
144 	}
145 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
146 		saddr = prev_saddr;
147 		daddr = x->coaddr;
148 	}
149 
150 	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
151 
152 	if (!IS_ERR(dst)) {
153 		if (prev_saddr != saddr)
154 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
155 		if (prev_daddr != daddr)
156 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
157 	}
158 
159 	return dst;
160 }
161 
162 static inline unsigned long make_jiffies(long secs)
163 {
164 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
165 		return MAX_SCHEDULE_TIMEOUT-1;
166 	else
167 		return secs*HZ;
168 }
169 
170 static void xfrm_policy_timer(unsigned long data)
171 {
172 	struct xfrm_policy *xp = (struct xfrm_policy *)data;
173 	unsigned long now = get_seconds();
174 	long next = LONG_MAX;
175 	int warn = 0;
176 	int dir;
177 
178 	read_lock(&xp->lock);
179 
180 	if (unlikely(xp->walk.dead))
181 		goto out;
182 
183 	dir = xfrm_policy_id2dir(xp->index);
184 
185 	if (xp->lft.hard_add_expires_seconds) {
186 		long tmo = xp->lft.hard_add_expires_seconds +
187 			xp->curlft.add_time - now;
188 		if (tmo <= 0)
189 			goto expired;
190 		if (tmo < next)
191 			next = tmo;
192 	}
193 	if (xp->lft.hard_use_expires_seconds) {
194 		long tmo = xp->lft.hard_use_expires_seconds +
195 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
196 		if (tmo <= 0)
197 			goto expired;
198 		if (tmo < next)
199 			next = tmo;
200 	}
201 	if (xp->lft.soft_add_expires_seconds) {
202 		long tmo = xp->lft.soft_add_expires_seconds +
203 			xp->curlft.add_time - now;
204 		if (tmo <= 0) {
205 			warn = 1;
206 			tmo = XFRM_KM_TIMEOUT;
207 		}
208 		if (tmo < next)
209 			next = tmo;
210 	}
211 	if (xp->lft.soft_use_expires_seconds) {
212 		long tmo = xp->lft.soft_use_expires_seconds +
213 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
214 		if (tmo <= 0) {
215 			warn = 1;
216 			tmo = XFRM_KM_TIMEOUT;
217 		}
218 		if (tmo < next)
219 			next = tmo;
220 	}
221 
222 	if (warn)
223 		km_policy_expired(xp, dir, 0, 0);
224 	if (next != LONG_MAX &&
225 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
226 		xfrm_pol_hold(xp);
227 
228 out:
229 	read_unlock(&xp->lock);
230 	xfrm_pol_put(xp);
231 	return;
232 
233 expired:
234 	read_unlock(&xp->lock);
235 	if (!xfrm_policy_delete(xp, dir))
236 		km_policy_expired(xp, dir, 1, 0);
237 	xfrm_pol_put(xp);
238 }
239 
240 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
241 {
242 	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
243 
244 	if (unlikely(pol->walk.dead))
245 		flo = NULL;
246 	else
247 		xfrm_pol_hold(pol);
248 
249 	return flo;
250 }
251 
252 static int xfrm_policy_flo_check(struct flow_cache_object *flo)
253 {
254 	struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
255 
256 	return !pol->walk.dead;
257 }
258 
259 static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
260 {
261 	xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
262 }
263 
264 static const struct flow_cache_ops xfrm_policy_fc_ops = {
265 	.get = xfrm_policy_flo_get,
266 	.check = xfrm_policy_flo_check,
267 	.delete = xfrm_policy_flo_delete,
268 };
269 
270 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
271  * SPD calls.
272  */
273 
274 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
275 {
276 	struct xfrm_policy *policy;
277 
278 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
279 
280 	if (policy) {
281 		write_pnet(&policy->xp_net, net);
282 		INIT_LIST_HEAD(&policy->walk.all);
283 		INIT_HLIST_NODE(&policy->bydst);
284 		INIT_HLIST_NODE(&policy->byidx);
285 		rwlock_init(&policy->lock);
286 		atomic_set(&policy->refcnt, 1);
287 		skb_queue_head_init(&policy->polq.hold_queue);
288 		setup_timer(&policy->timer, xfrm_policy_timer,
289 				(unsigned long)policy);
290 		setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process,
291 			    (unsigned long)policy);
292 		policy->flo.ops = &xfrm_policy_fc_ops;
293 	}
294 	return policy;
295 }
296 EXPORT_SYMBOL(xfrm_policy_alloc);
297 
298 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
299 
300 void xfrm_policy_destroy(struct xfrm_policy *policy)
301 {
302 	BUG_ON(!policy->walk.dead);
303 
304 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
305 		BUG();
306 
307 	security_xfrm_policy_free(policy->security);
308 	kfree(policy);
309 }
310 EXPORT_SYMBOL(xfrm_policy_destroy);
311 
312 static void xfrm_queue_purge(struct sk_buff_head *list)
313 {
314 	struct sk_buff *skb;
315 
316 	while ((skb = skb_dequeue(list)) != NULL)
317 		kfree_skb(skb);
318 }
319 
320 /* Rule must be locked. Release descentant resources, announce
321  * entry dead. The rule must be unlinked from lists to the moment.
322  */
323 
324 static void xfrm_policy_kill(struct xfrm_policy *policy)
325 {
326 	policy->walk.dead = 1;
327 
328 	atomic_inc(&policy->genid);
329 
330 	if (del_timer(&policy->polq.hold_timer))
331 		xfrm_pol_put(policy);
332 	xfrm_queue_purge(&policy->polq.hold_queue);
333 
334 	if (del_timer(&policy->timer))
335 		xfrm_pol_put(policy);
336 
337 	xfrm_pol_put(policy);
338 }
339 
340 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
341 
342 static inline unsigned int idx_hash(struct net *net, u32 index)
343 {
344 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
345 }
346 
347 static struct hlist_head *policy_hash_bysel(struct net *net,
348 					    const struct xfrm_selector *sel,
349 					    unsigned short family, int dir)
350 {
351 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
352 	unsigned int hash = __sel_hash(sel, family, hmask);
353 
354 	return (hash == hmask + 1 ?
355 		&net->xfrm.policy_inexact[dir] :
356 		net->xfrm.policy_bydst[dir].table + hash);
357 }
358 
359 static struct hlist_head *policy_hash_direct(struct net *net,
360 					     const xfrm_address_t *daddr,
361 					     const xfrm_address_t *saddr,
362 					     unsigned short family, int dir)
363 {
364 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
365 	unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
366 
367 	return net->xfrm.policy_bydst[dir].table + hash;
368 }
369 
370 static void xfrm_dst_hash_transfer(struct hlist_head *list,
371 				   struct hlist_head *ndsttable,
372 				   unsigned int nhashmask)
373 {
374 	struct hlist_node *tmp, *entry0 = NULL;
375 	struct xfrm_policy *pol;
376 	unsigned int h0 = 0;
377 
378 redo:
379 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
380 		unsigned int h;
381 
382 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
383 				pol->family, nhashmask);
384 		if (!entry0) {
385 			hlist_del(&pol->bydst);
386 			hlist_add_head(&pol->bydst, ndsttable+h);
387 			h0 = h;
388 		} else {
389 			if (h != h0)
390 				continue;
391 			hlist_del(&pol->bydst);
392 			hlist_add_after(entry0, &pol->bydst);
393 		}
394 		entry0 = &pol->bydst;
395 	}
396 	if (!hlist_empty(list)) {
397 		entry0 = NULL;
398 		goto redo;
399 	}
400 }
401 
402 static void xfrm_idx_hash_transfer(struct hlist_head *list,
403 				   struct hlist_head *nidxtable,
404 				   unsigned int nhashmask)
405 {
406 	struct hlist_node *tmp;
407 	struct xfrm_policy *pol;
408 
409 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
410 		unsigned int h;
411 
412 		h = __idx_hash(pol->index, nhashmask);
413 		hlist_add_head(&pol->byidx, nidxtable+h);
414 	}
415 }
416 
417 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
418 {
419 	return ((old_hmask + 1) << 1) - 1;
420 }
421 
422 static void xfrm_bydst_resize(struct net *net, int dir)
423 {
424 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
425 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
426 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
427 	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
428 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
429 	int i;
430 
431 	if (!ndst)
432 		return;
433 
434 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
435 
436 	for (i = hmask; i >= 0; i--)
437 		xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
438 
439 	net->xfrm.policy_bydst[dir].table = ndst;
440 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
441 
442 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
443 
444 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
445 }
446 
447 static void xfrm_byidx_resize(struct net *net, int total)
448 {
449 	unsigned int hmask = net->xfrm.policy_idx_hmask;
450 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
451 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
452 	struct hlist_head *oidx = net->xfrm.policy_byidx;
453 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
454 	int i;
455 
456 	if (!nidx)
457 		return;
458 
459 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
460 
461 	for (i = hmask; i >= 0; i--)
462 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
463 
464 	net->xfrm.policy_byidx = nidx;
465 	net->xfrm.policy_idx_hmask = nhashmask;
466 
467 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
468 
469 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
470 }
471 
472 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
473 {
474 	unsigned int cnt = net->xfrm.policy_count[dir];
475 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
476 
477 	if (total)
478 		*total += cnt;
479 
480 	if ((hmask + 1) < xfrm_policy_hashmax &&
481 	    cnt > hmask)
482 		return 1;
483 
484 	return 0;
485 }
486 
487 static inline int xfrm_byidx_should_resize(struct net *net, int total)
488 {
489 	unsigned int hmask = net->xfrm.policy_idx_hmask;
490 
491 	if ((hmask + 1) < xfrm_policy_hashmax &&
492 	    total > hmask)
493 		return 1;
494 
495 	return 0;
496 }
497 
498 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
499 {
500 	read_lock_bh(&net->xfrm.xfrm_policy_lock);
501 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
502 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
503 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
504 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
505 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
506 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
507 	si->spdhcnt = net->xfrm.policy_idx_hmask;
508 	si->spdhmcnt = xfrm_policy_hashmax;
509 	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
510 }
511 EXPORT_SYMBOL(xfrm_spd_getinfo);
512 
513 static DEFINE_MUTEX(hash_resize_mutex);
514 static void xfrm_hash_resize(struct work_struct *work)
515 {
516 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
517 	int dir, total;
518 
519 	mutex_lock(&hash_resize_mutex);
520 
521 	total = 0;
522 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
523 		if (xfrm_bydst_should_resize(net, dir, &total))
524 			xfrm_bydst_resize(net, dir);
525 	}
526 	if (xfrm_byidx_should_resize(net, total))
527 		xfrm_byidx_resize(net, total);
528 
529 	mutex_unlock(&hash_resize_mutex);
530 }
531 
532 /* Generate new index... KAME seems to generate them ordered by cost
533  * of an absolute inpredictability of ordering of rules. This will not pass. */
534 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
535 {
536 	static u32 idx_generator;
537 
538 	for (;;) {
539 		struct hlist_head *list;
540 		struct xfrm_policy *p;
541 		u32 idx;
542 		int found;
543 
544 		if (!index) {
545 			idx = (idx_generator | dir);
546 			idx_generator += 8;
547 		} else {
548 			idx = index;
549 			index = 0;
550 		}
551 
552 		if (idx == 0)
553 			idx = 8;
554 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
555 		found = 0;
556 		hlist_for_each_entry(p, list, byidx) {
557 			if (p->index == idx) {
558 				found = 1;
559 				break;
560 			}
561 		}
562 		if (!found)
563 			return idx;
564 	}
565 }
566 
567 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
568 {
569 	u32 *p1 = (u32 *) s1;
570 	u32 *p2 = (u32 *) s2;
571 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
572 	int i;
573 
574 	for (i = 0; i < len; i++) {
575 		if (p1[i] != p2[i])
576 			return 1;
577 	}
578 
579 	return 0;
580 }
581 
582 static void xfrm_policy_requeue(struct xfrm_policy *old,
583 				struct xfrm_policy *new)
584 {
585 	struct xfrm_policy_queue *pq = &old->polq;
586 	struct sk_buff_head list;
587 
588 	__skb_queue_head_init(&list);
589 
590 	spin_lock_bh(&pq->hold_queue.lock);
591 	skb_queue_splice_init(&pq->hold_queue, &list);
592 	if (del_timer(&pq->hold_timer))
593 		xfrm_pol_put(old);
594 	spin_unlock_bh(&pq->hold_queue.lock);
595 
596 	if (skb_queue_empty(&list))
597 		return;
598 
599 	pq = &new->polq;
600 
601 	spin_lock_bh(&pq->hold_queue.lock);
602 	skb_queue_splice(&list, &pq->hold_queue);
603 	pq->timeout = XFRM_QUEUE_TMO_MIN;
604 	if (!mod_timer(&pq->hold_timer, jiffies))
605 		xfrm_pol_hold(new);
606 	spin_unlock_bh(&pq->hold_queue.lock);
607 }
608 
609 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
610 				   struct xfrm_policy *pol)
611 {
612 	u32 mark = policy->mark.v & policy->mark.m;
613 
614 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
615 		return true;
616 
617 	if ((mark & pol->mark.m) == pol->mark.v &&
618 	    policy->priority == pol->priority)
619 		return true;
620 
621 	return false;
622 }
623 
624 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
625 {
626 	struct net *net = xp_net(policy);
627 	struct xfrm_policy *pol;
628 	struct xfrm_policy *delpol;
629 	struct hlist_head *chain;
630 	struct hlist_node *newpos;
631 
632 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
633 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
634 	delpol = NULL;
635 	newpos = NULL;
636 	hlist_for_each_entry(pol, chain, bydst) {
637 		if (pol->type == policy->type &&
638 		    !selector_cmp(&pol->selector, &policy->selector) &&
639 		    xfrm_policy_mark_match(policy, pol) &&
640 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
641 		    !WARN_ON(delpol)) {
642 			if (excl) {
643 				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
644 				return -EEXIST;
645 			}
646 			delpol = pol;
647 			if (policy->priority > pol->priority)
648 				continue;
649 		} else if (policy->priority >= pol->priority) {
650 			newpos = &pol->bydst;
651 			continue;
652 		}
653 		if (delpol)
654 			break;
655 	}
656 	if (newpos)
657 		hlist_add_after(newpos, &policy->bydst);
658 	else
659 		hlist_add_head(&policy->bydst, chain);
660 	xfrm_pol_hold(policy);
661 	net->xfrm.policy_count[dir]++;
662 	atomic_inc(&net->xfrm.flow_cache_genid);
663 
664 	/* After previous checking, family can either be AF_INET or AF_INET6 */
665 	if (policy->family == AF_INET)
666 		rt_genid_bump_ipv4(net);
667 	else
668 		rt_genid_bump_ipv6(net);
669 
670 	if (delpol) {
671 		xfrm_policy_requeue(delpol, policy);
672 		__xfrm_policy_unlink(delpol, dir);
673 	}
674 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
675 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
676 	policy->curlft.add_time = get_seconds();
677 	policy->curlft.use_time = 0;
678 	if (!mod_timer(&policy->timer, jiffies + HZ))
679 		xfrm_pol_hold(policy);
680 	list_add(&policy->walk.all, &net->xfrm.policy_all);
681 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
682 
683 	if (delpol)
684 		xfrm_policy_kill(delpol);
685 	else if (xfrm_bydst_should_resize(net, dir, NULL))
686 		schedule_work(&net->xfrm.policy_hash_work);
687 
688 	return 0;
689 }
690 EXPORT_SYMBOL(xfrm_policy_insert);
691 
692 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
693 					  int dir, struct xfrm_selector *sel,
694 					  struct xfrm_sec_ctx *ctx, int delete,
695 					  int *err)
696 {
697 	struct xfrm_policy *pol, *ret;
698 	struct hlist_head *chain;
699 
700 	*err = 0;
701 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
702 	chain = policy_hash_bysel(net, sel, sel->family, dir);
703 	ret = NULL;
704 	hlist_for_each_entry(pol, chain, bydst) {
705 		if (pol->type == type &&
706 		    (mark & pol->mark.m) == pol->mark.v &&
707 		    !selector_cmp(sel, &pol->selector) &&
708 		    xfrm_sec_ctx_match(ctx, pol->security)) {
709 			xfrm_pol_hold(pol);
710 			if (delete) {
711 				*err = security_xfrm_policy_delete(
712 								pol->security);
713 				if (*err) {
714 					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
715 					return pol;
716 				}
717 				__xfrm_policy_unlink(pol, dir);
718 			}
719 			ret = pol;
720 			break;
721 		}
722 	}
723 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
724 
725 	if (ret && delete)
726 		xfrm_policy_kill(ret);
727 	return ret;
728 }
729 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
730 
731 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
732 				     int dir, u32 id, int delete, int *err)
733 {
734 	struct xfrm_policy *pol, *ret;
735 	struct hlist_head *chain;
736 
737 	*err = -ENOENT;
738 	if (xfrm_policy_id2dir(id) != dir)
739 		return NULL;
740 
741 	*err = 0;
742 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
743 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
744 	ret = NULL;
745 	hlist_for_each_entry(pol, chain, byidx) {
746 		if (pol->type == type && pol->index == id &&
747 		    (mark & pol->mark.m) == pol->mark.v) {
748 			xfrm_pol_hold(pol);
749 			if (delete) {
750 				*err = security_xfrm_policy_delete(
751 								pol->security);
752 				if (*err) {
753 					write_unlock_bh(&net->xfrm.xfrm_policy_lock);
754 					return pol;
755 				}
756 				__xfrm_policy_unlink(pol, dir);
757 			}
758 			ret = pol;
759 			break;
760 		}
761 	}
762 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
763 
764 	if (ret && delete)
765 		xfrm_policy_kill(ret);
766 	return ret;
767 }
768 EXPORT_SYMBOL(xfrm_policy_byid);
769 
770 #ifdef CONFIG_SECURITY_NETWORK_XFRM
771 static inline int
772 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
773 {
774 	int dir, err = 0;
775 
776 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
777 		struct xfrm_policy *pol;
778 		int i;
779 
780 		hlist_for_each_entry(pol,
781 				     &net->xfrm.policy_inexact[dir], bydst) {
782 			if (pol->type != type)
783 				continue;
784 			err = security_xfrm_policy_delete(pol->security);
785 			if (err) {
786 				xfrm_audit_policy_delete(pol, 0, task_valid);
787 				return err;
788 			}
789 		}
790 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
791 			hlist_for_each_entry(pol,
792 					     net->xfrm.policy_bydst[dir].table + i,
793 					     bydst) {
794 				if (pol->type != type)
795 					continue;
796 				err = security_xfrm_policy_delete(
797 								pol->security);
798 				if (err) {
799 					xfrm_audit_policy_delete(pol, 0,
800 								 task_valid);
801 					return err;
802 				}
803 			}
804 		}
805 	}
806 	return err;
807 }
808 #else
809 static inline int
810 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
811 {
812 	return 0;
813 }
814 #endif
815 
816 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
817 {
818 	int dir, err = 0, cnt = 0;
819 
820 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
821 
822 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
823 	if (err)
824 		goto out;
825 
826 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
827 		struct xfrm_policy *pol;
828 		int i;
829 
830 	again1:
831 		hlist_for_each_entry(pol,
832 				     &net->xfrm.policy_inexact[dir], bydst) {
833 			if (pol->type != type)
834 				continue;
835 			__xfrm_policy_unlink(pol, dir);
836 			write_unlock_bh(&net->xfrm.xfrm_policy_lock);
837 			cnt++;
838 
839 			xfrm_audit_policy_delete(pol, 1, task_valid);
840 
841 			xfrm_policy_kill(pol);
842 
843 			write_lock_bh(&net->xfrm.xfrm_policy_lock);
844 			goto again1;
845 		}
846 
847 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
848 	again2:
849 			hlist_for_each_entry(pol,
850 					     net->xfrm.policy_bydst[dir].table + i,
851 					     bydst) {
852 				if (pol->type != type)
853 					continue;
854 				__xfrm_policy_unlink(pol, dir);
855 				write_unlock_bh(&net->xfrm.xfrm_policy_lock);
856 				cnt++;
857 
858 				xfrm_audit_policy_delete(pol, 1, task_valid);
859 				xfrm_policy_kill(pol);
860 
861 				write_lock_bh(&net->xfrm.xfrm_policy_lock);
862 				goto again2;
863 			}
864 		}
865 
866 	}
867 	if (!cnt)
868 		err = -ESRCH;
869 out:
870 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
871 	return err;
872 }
873 EXPORT_SYMBOL(xfrm_policy_flush);
874 
875 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
876 		     int (*func)(struct xfrm_policy *, int, int, void*),
877 		     void *data)
878 {
879 	struct xfrm_policy *pol;
880 	struct xfrm_policy_walk_entry *x;
881 	int error = 0;
882 
883 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
884 	    walk->type != XFRM_POLICY_TYPE_ANY)
885 		return -EINVAL;
886 
887 	if (list_empty(&walk->walk.all) && walk->seq != 0)
888 		return 0;
889 
890 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
891 	if (list_empty(&walk->walk.all))
892 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
893 	else
894 		x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
895 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
896 		if (x->dead)
897 			continue;
898 		pol = container_of(x, struct xfrm_policy, walk);
899 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
900 		    walk->type != pol->type)
901 			continue;
902 		error = func(pol, xfrm_policy_id2dir(pol->index),
903 			     walk->seq, data);
904 		if (error) {
905 			list_move_tail(&walk->walk.all, &x->all);
906 			goto out;
907 		}
908 		walk->seq++;
909 	}
910 	if (walk->seq == 0) {
911 		error = -ENOENT;
912 		goto out;
913 	}
914 	list_del_init(&walk->walk.all);
915 out:
916 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
917 	return error;
918 }
919 EXPORT_SYMBOL(xfrm_policy_walk);
920 
921 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
922 {
923 	INIT_LIST_HEAD(&walk->walk.all);
924 	walk->walk.dead = 1;
925 	walk->type = type;
926 	walk->seq = 0;
927 }
928 EXPORT_SYMBOL(xfrm_policy_walk_init);
929 
930 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
931 {
932 	if (list_empty(&walk->walk.all))
933 		return;
934 
935 	write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
936 	list_del(&walk->walk.all);
937 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
938 }
939 EXPORT_SYMBOL(xfrm_policy_walk_done);
940 
941 /*
942  * Find policy to apply to this flow.
943  *
944  * Returns 0 if policy found, else an -errno.
945  */
946 static int xfrm_policy_match(const struct xfrm_policy *pol,
947 			     const struct flowi *fl,
948 			     u8 type, u16 family, int dir)
949 {
950 	const struct xfrm_selector *sel = &pol->selector;
951 	int ret = -ESRCH;
952 	bool match;
953 
954 	if (pol->family != family ||
955 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
956 	    pol->type != type)
957 		return ret;
958 
959 	match = xfrm_selector_match(sel, fl, family);
960 	if (match)
961 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
962 						  dir);
963 
964 	return ret;
965 }
966 
967 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
968 						     const struct flowi *fl,
969 						     u16 family, u8 dir)
970 {
971 	int err;
972 	struct xfrm_policy *pol, *ret;
973 	const xfrm_address_t *daddr, *saddr;
974 	struct hlist_head *chain;
975 	u32 priority = ~0U;
976 
977 	daddr = xfrm_flowi_daddr(fl, family);
978 	saddr = xfrm_flowi_saddr(fl, family);
979 	if (unlikely(!daddr || !saddr))
980 		return NULL;
981 
982 	read_lock_bh(&net->xfrm.xfrm_policy_lock);
983 	chain = policy_hash_direct(net, daddr, saddr, family, dir);
984 	ret = NULL;
985 	hlist_for_each_entry(pol, chain, bydst) {
986 		err = xfrm_policy_match(pol, fl, type, family, dir);
987 		if (err) {
988 			if (err == -ESRCH)
989 				continue;
990 			else {
991 				ret = ERR_PTR(err);
992 				goto fail;
993 			}
994 		} else {
995 			ret = pol;
996 			priority = ret->priority;
997 			break;
998 		}
999 	}
1000 	chain = &net->xfrm.policy_inexact[dir];
1001 	hlist_for_each_entry(pol, chain, bydst) {
1002 		err = xfrm_policy_match(pol, fl, type, family, dir);
1003 		if (err) {
1004 			if (err == -ESRCH)
1005 				continue;
1006 			else {
1007 				ret = ERR_PTR(err);
1008 				goto fail;
1009 			}
1010 		} else if (pol->priority < priority) {
1011 			ret = pol;
1012 			break;
1013 		}
1014 	}
1015 	if (ret)
1016 		xfrm_pol_hold(ret);
1017 fail:
1018 	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1019 
1020 	return ret;
1021 }
1022 
1023 static struct xfrm_policy *
1024 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1025 {
1026 #ifdef CONFIG_XFRM_SUB_POLICY
1027 	struct xfrm_policy *pol;
1028 
1029 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1030 	if (pol != NULL)
1031 		return pol;
1032 #endif
1033 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1034 }
1035 
1036 static int flow_to_policy_dir(int dir)
1037 {
1038 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1039 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1040 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1041 		return dir;
1042 
1043 	switch (dir) {
1044 	default:
1045 	case FLOW_DIR_IN:
1046 		return XFRM_POLICY_IN;
1047 	case FLOW_DIR_OUT:
1048 		return XFRM_POLICY_OUT;
1049 	case FLOW_DIR_FWD:
1050 		return XFRM_POLICY_FWD;
1051 	}
1052 }
1053 
1054 static struct flow_cache_object *
1055 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1056 		   u8 dir, struct flow_cache_object *old_obj, void *ctx)
1057 {
1058 	struct xfrm_policy *pol;
1059 
1060 	if (old_obj)
1061 		xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1062 
1063 	pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1064 	if (IS_ERR_OR_NULL(pol))
1065 		return ERR_CAST(pol);
1066 
1067 	/* Resolver returns two references:
1068 	 * one for cache and one for caller of flow_cache_lookup() */
1069 	xfrm_pol_hold(pol);
1070 
1071 	return &pol->flo;
1072 }
1073 
1074 static inline int policy_to_flow_dir(int dir)
1075 {
1076 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1077 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1078 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1079 		return dir;
1080 	switch (dir) {
1081 	default:
1082 	case XFRM_POLICY_IN:
1083 		return FLOW_DIR_IN;
1084 	case XFRM_POLICY_OUT:
1085 		return FLOW_DIR_OUT;
1086 	case XFRM_POLICY_FWD:
1087 		return FLOW_DIR_FWD;
1088 	}
1089 }
1090 
1091 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir,
1092 						 const struct flowi *fl)
1093 {
1094 	struct xfrm_policy *pol;
1095 	struct net *net = sock_net(sk);
1096 
1097 	read_lock_bh(&net->xfrm.xfrm_policy_lock);
1098 	if ((pol = sk->sk_policy[dir]) != NULL) {
1099 		bool match = xfrm_selector_match(&pol->selector, fl,
1100 						 sk->sk_family);
1101 		int err = 0;
1102 
1103 		if (match) {
1104 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1105 				pol = NULL;
1106 				goto out;
1107 			}
1108 			err = security_xfrm_policy_lookup(pol->security,
1109 						      fl->flowi_secid,
1110 						      policy_to_flow_dir(dir));
1111 			if (!err)
1112 				xfrm_pol_hold(pol);
1113 			else if (err == -ESRCH)
1114 				pol = NULL;
1115 			else
1116 				pol = ERR_PTR(err);
1117 		} else
1118 			pol = NULL;
1119 	}
1120 out:
1121 	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
1122 	return pol;
1123 }
1124 
1125 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1126 {
1127 	struct net *net = xp_net(pol);
1128 	struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1129 						     pol->family, dir);
1130 
1131 	list_add(&pol->walk.all, &net->xfrm.policy_all);
1132 	hlist_add_head(&pol->bydst, chain);
1133 	hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1134 	net->xfrm.policy_count[dir]++;
1135 	xfrm_pol_hold(pol);
1136 
1137 	if (xfrm_bydst_should_resize(net, dir, NULL))
1138 		schedule_work(&net->xfrm.policy_hash_work);
1139 }
1140 
1141 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1142 						int dir)
1143 {
1144 	struct net *net = xp_net(pol);
1145 
1146 	if (hlist_unhashed(&pol->bydst))
1147 		return NULL;
1148 
1149 	hlist_del_init(&pol->bydst);
1150 	hlist_del(&pol->byidx);
1151 	list_del(&pol->walk.all);
1152 	net->xfrm.policy_count[dir]--;
1153 
1154 	return pol;
1155 }
1156 
1157 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1158 {
1159 	struct net *net = xp_net(pol);
1160 
1161 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1162 	pol = __xfrm_policy_unlink(pol, dir);
1163 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1164 	if (pol) {
1165 		xfrm_policy_kill(pol);
1166 		return 0;
1167 	}
1168 	return -ENOENT;
1169 }
1170 EXPORT_SYMBOL(xfrm_policy_delete);
1171 
1172 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1173 {
1174 	struct net *net = xp_net(pol);
1175 	struct xfrm_policy *old_pol;
1176 
1177 #ifdef CONFIG_XFRM_SUB_POLICY
1178 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1179 		return -EINVAL;
1180 #endif
1181 
1182 	write_lock_bh(&net->xfrm.xfrm_policy_lock);
1183 	old_pol = sk->sk_policy[dir];
1184 	sk->sk_policy[dir] = pol;
1185 	if (pol) {
1186 		pol->curlft.add_time = get_seconds();
1187 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1188 		__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1189 	}
1190 	if (old_pol) {
1191 		if (pol)
1192 			xfrm_policy_requeue(old_pol, pol);
1193 
1194 		/* Unlinking succeeds always. This is the only function
1195 		 * allowed to delete or replace socket policy.
1196 		 */
1197 		__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1198 	}
1199 	write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1200 
1201 	if (old_pol) {
1202 		xfrm_policy_kill(old_pol);
1203 	}
1204 	return 0;
1205 }
1206 
1207 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1208 {
1209 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1210 	struct net *net = xp_net(old);
1211 
1212 	if (newp) {
1213 		newp->selector = old->selector;
1214 		if (security_xfrm_policy_clone(old->security,
1215 					       &newp->security)) {
1216 			kfree(newp);
1217 			return NULL;  /* ENOMEM */
1218 		}
1219 		newp->lft = old->lft;
1220 		newp->curlft = old->curlft;
1221 		newp->mark = old->mark;
1222 		newp->action = old->action;
1223 		newp->flags = old->flags;
1224 		newp->xfrm_nr = old->xfrm_nr;
1225 		newp->index = old->index;
1226 		newp->type = old->type;
1227 		memcpy(newp->xfrm_vec, old->xfrm_vec,
1228 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1229 		write_lock_bh(&net->xfrm.xfrm_policy_lock);
1230 		__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1231 		write_unlock_bh(&net->xfrm.xfrm_policy_lock);
1232 		xfrm_pol_put(newp);
1233 	}
1234 	return newp;
1235 }
1236 
1237 int __xfrm_sk_clone_policy(struct sock *sk)
1238 {
1239 	struct xfrm_policy *p0 = sk->sk_policy[0],
1240 			   *p1 = sk->sk_policy[1];
1241 
1242 	sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1243 	if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1244 		return -ENOMEM;
1245 	if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1246 		return -ENOMEM;
1247 	return 0;
1248 }
1249 
1250 static int
1251 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1252 	       unsigned short family)
1253 {
1254 	int err;
1255 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1256 
1257 	if (unlikely(afinfo == NULL))
1258 		return -EINVAL;
1259 	err = afinfo->get_saddr(net, local, remote);
1260 	xfrm_policy_put_afinfo(afinfo);
1261 	return err;
1262 }
1263 
1264 /* Resolve list of templates for the flow, given policy. */
1265 
1266 static int
1267 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1268 		      struct xfrm_state **xfrm, unsigned short family)
1269 {
1270 	struct net *net = xp_net(policy);
1271 	int nx;
1272 	int i, error;
1273 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1274 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1275 	xfrm_address_t tmp;
1276 
1277 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1278 		struct xfrm_state *x;
1279 		xfrm_address_t *remote = daddr;
1280 		xfrm_address_t *local  = saddr;
1281 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1282 
1283 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1284 		    tmpl->mode == XFRM_MODE_BEET) {
1285 			remote = &tmpl->id.daddr;
1286 			local = &tmpl->saddr;
1287 			if (xfrm_addr_any(local, tmpl->encap_family)) {
1288 				error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
1289 				if (error)
1290 					goto fail;
1291 				local = &tmp;
1292 			}
1293 		}
1294 
1295 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1296 
1297 		if (x && x->km.state == XFRM_STATE_VALID) {
1298 			xfrm[nx++] = x;
1299 			daddr = remote;
1300 			saddr = local;
1301 			continue;
1302 		}
1303 		if (x) {
1304 			error = (x->km.state == XFRM_STATE_ERROR ?
1305 				 -EINVAL : -EAGAIN);
1306 			xfrm_state_put(x);
1307 		} else if (error == -ESRCH) {
1308 			error = -EAGAIN;
1309 		}
1310 
1311 		if (!tmpl->optional)
1312 			goto fail;
1313 	}
1314 	return nx;
1315 
1316 fail:
1317 	for (nx--; nx >= 0; nx--)
1318 		xfrm_state_put(xfrm[nx]);
1319 	return error;
1320 }
1321 
1322 static int
1323 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1324 		  struct xfrm_state **xfrm, unsigned short family)
1325 {
1326 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1327 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1328 	int cnx = 0;
1329 	int error;
1330 	int ret;
1331 	int i;
1332 
1333 	for (i = 0; i < npols; i++) {
1334 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1335 			error = -ENOBUFS;
1336 			goto fail;
1337 		}
1338 
1339 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1340 		if (ret < 0) {
1341 			error = ret;
1342 			goto fail;
1343 		} else
1344 			cnx += ret;
1345 	}
1346 
1347 	/* found states are sorted for outbound processing */
1348 	if (npols > 1)
1349 		xfrm_state_sort(xfrm, tpp, cnx, family);
1350 
1351 	return cnx;
1352 
1353  fail:
1354 	for (cnx--; cnx >= 0; cnx--)
1355 		xfrm_state_put(tpp[cnx]);
1356 	return error;
1357 
1358 }
1359 
1360 /* Check that the bundle accepts the flow and its components are
1361  * still valid.
1362  */
1363 
1364 static inline int xfrm_get_tos(const struct flowi *fl, int family)
1365 {
1366 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1367 	int tos;
1368 
1369 	if (!afinfo)
1370 		return -EINVAL;
1371 
1372 	tos = afinfo->get_tos(fl);
1373 
1374 	xfrm_policy_put_afinfo(afinfo);
1375 
1376 	return tos;
1377 }
1378 
1379 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1380 {
1381 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1382 	struct dst_entry *dst = &xdst->u.dst;
1383 
1384 	if (xdst->route == NULL) {
1385 		/* Dummy bundle - if it has xfrms we were not
1386 		 * able to build bundle as template resolution failed.
1387 		 * It means we need to try again resolving. */
1388 		if (xdst->num_xfrms > 0)
1389 			return NULL;
1390 	} else if (dst->flags & DST_XFRM_QUEUE) {
1391 		return NULL;
1392 	} else {
1393 		/* Real bundle */
1394 		if (stale_bundle(dst))
1395 			return NULL;
1396 	}
1397 
1398 	dst_hold(dst);
1399 	return flo;
1400 }
1401 
1402 static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1403 {
1404 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1405 	struct dst_entry *dst = &xdst->u.dst;
1406 
1407 	if (!xdst->route)
1408 		return 0;
1409 	if (stale_bundle(dst))
1410 		return 0;
1411 
1412 	return 1;
1413 }
1414 
1415 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1416 {
1417 	struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1418 	struct dst_entry *dst = &xdst->u.dst;
1419 
1420 	dst_free(dst);
1421 }
1422 
1423 static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1424 	.get = xfrm_bundle_flo_get,
1425 	.check = xfrm_bundle_flo_check,
1426 	.delete = xfrm_bundle_flo_delete,
1427 };
1428 
1429 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1430 {
1431 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1432 	struct dst_ops *dst_ops;
1433 	struct xfrm_dst *xdst;
1434 
1435 	if (!afinfo)
1436 		return ERR_PTR(-EINVAL);
1437 
1438 	switch (family) {
1439 	case AF_INET:
1440 		dst_ops = &net->xfrm.xfrm4_dst_ops;
1441 		break;
1442 #if IS_ENABLED(CONFIG_IPV6)
1443 	case AF_INET6:
1444 		dst_ops = &net->xfrm.xfrm6_dst_ops;
1445 		break;
1446 #endif
1447 	default:
1448 		BUG();
1449 	}
1450 	xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
1451 
1452 	if (likely(xdst)) {
1453 		struct dst_entry *dst = &xdst->u.dst;
1454 
1455 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1456 		xdst->flo.ops = &xfrm_bundle_fc_ops;
1457 		if (afinfo->init_dst)
1458 			afinfo->init_dst(net, xdst);
1459 	} else
1460 		xdst = ERR_PTR(-ENOBUFS);
1461 
1462 	xfrm_policy_put_afinfo(afinfo);
1463 
1464 	return xdst;
1465 }
1466 
1467 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1468 				 int nfheader_len)
1469 {
1470 	struct xfrm_policy_afinfo *afinfo =
1471 		xfrm_policy_get_afinfo(dst->ops->family);
1472 	int err;
1473 
1474 	if (!afinfo)
1475 		return -EINVAL;
1476 
1477 	err = afinfo->init_path(path, dst, nfheader_len);
1478 
1479 	xfrm_policy_put_afinfo(afinfo);
1480 
1481 	return err;
1482 }
1483 
1484 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1485 				const struct flowi *fl)
1486 {
1487 	struct xfrm_policy_afinfo *afinfo =
1488 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1489 	int err;
1490 
1491 	if (!afinfo)
1492 		return -EINVAL;
1493 
1494 	err = afinfo->fill_dst(xdst, dev, fl);
1495 
1496 	xfrm_policy_put_afinfo(afinfo);
1497 
1498 	return err;
1499 }
1500 
1501 
1502 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1503  * all the metrics... Shortly, bundle a bundle.
1504  */
1505 
1506 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1507 					    struct xfrm_state **xfrm, int nx,
1508 					    const struct flowi *fl,
1509 					    struct dst_entry *dst)
1510 {
1511 	struct net *net = xp_net(policy);
1512 	unsigned long now = jiffies;
1513 	struct net_device *dev;
1514 	struct xfrm_mode *inner_mode;
1515 	struct dst_entry *dst_prev = NULL;
1516 	struct dst_entry *dst0 = NULL;
1517 	int i = 0;
1518 	int err;
1519 	int header_len = 0;
1520 	int nfheader_len = 0;
1521 	int trailer_len = 0;
1522 	int tos;
1523 	int family = policy->selector.family;
1524 	xfrm_address_t saddr, daddr;
1525 
1526 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1527 
1528 	tos = xfrm_get_tos(fl, family);
1529 	err = tos;
1530 	if (tos < 0)
1531 		goto put_states;
1532 
1533 	dst_hold(dst);
1534 
1535 	for (; i < nx; i++) {
1536 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1537 		struct dst_entry *dst1 = &xdst->u.dst;
1538 
1539 		err = PTR_ERR(xdst);
1540 		if (IS_ERR(xdst)) {
1541 			dst_release(dst);
1542 			goto put_states;
1543 		}
1544 
1545 		if (xfrm[i]->sel.family == AF_UNSPEC) {
1546 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1547 							xfrm_af2proto(family));
1548 			if (!inner_mode) {
1549 				err = -EAFNOSUPPORT;
1550 				dst_release(dst);
1551 				goto put_states;
1552 			}
1553 		} else
1554 			inner_mode = xfrm[i]->inner_mode;
1555 
1556 		if (!dst_prev)
1557 			dst0 = dst1;
1558 		else {
1559 			dst_prev->child = dst_clone(dst1);
1560 			dst1->flags |= DST_NOHASH;
1561 		}
1562 
1563 		xdst->route = dst;
1564 		dst_copy_metrics(dst1, dst);
1565 
1566 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1567 			family = xfrm[i]->props.family;
1568 			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1569 					      family);
1570 			err = PTR_ERR(dst);
1571 			if (IS_ERR(dst))
1572 				goto put_states;
1573 		} else
1574 			dst_hold(dst);
1575 
1576 		dst1->xfrm = xfrm[i];
1577 		xdst->xfrm_genid = xfrm[i]->genid;
1578 
1579 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1580 		dst1->flags |= DST_HOST;
1581 		dst1->lastuse = now;
1582 
1583 		dst1->input = dst_discard;
1584 		dst1->output = inner_mode->afinfo->output;
1585 
1586 		dst1->next = dst_prev;
1587 		dst_prev = dst1;
1588 
1589 		header_len += xfrm[i]->props.header_len;
1590 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1591 			nfheader_len += xfrm[i]->props.header_len;
1592 		trailer_len += xfrm[i]->props.trailer_len;
1593 	}
1594 
1595 	dst_prev->child = dst;
1596 	dst0->path = dst;
1597 
1598 	err = -ENODEV;
1599 	dev = dst->dev;
1600 	if (!dev)
1601 		goto free_dst;
1602 
1603 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1604 	xfrm_init_pmtu(dst_prev);
1605 
1606 	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1607 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1608 
1609 		err = xfrm_fill_dst(xdst, dev, fl);
1610 		if (err)
1611 			goto free_dst;
1612 
1613 		dst_prev->header_len = header_len;
1614 		dst_prev->trailer_len = trailer_len;
1615 		header_len -= xdst->u.dst.xfrm->props.header_len;
1616 		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1617 	}
1618 
1619 out:
1620 	return dst0;
1621 
1622 put_states:
1623 	for (; i < nx; i++)
1624 		xfrm_state_put(xfrm[i]);
1625 free_dst:
1626 	if (dst0)
1627 		dst_free(dst0);
1628 	dst0 = ERR_PTR(err);
1629 	goto out;
1630 }
1631 
1632 #ifdef CONFIG_XFRM_SUB_POLICY
1633 static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1634 {
1635 	if (!*target) {
1636 		*target = kmalloc(size, GFP_ATOMIC);
1637 		if (!*target)
1638 			return -ENOMEM;
1639 	}
1640 
1641 	memcpy(*target, src, size);
1642 	return 0;
1643 }
1644 #endif
1645 
1646 static int xfrm_dst_update_parent(struct dst_entry *dst,
1647 				  const struct xfrm_selector *sel)
1648 {
1649 #ifdef CONFIG_XFRM_SUB_POLICY
1650 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1651 	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1652 				   sel, sizeof(*sel));
1653 #else
1654 	return 0;
1655 #endif
1656 }
1657 
1658 static int xfrm_dst_update_origin(struct dst_entry *dst,
1659 				  const struct flowi *fl)
1660 {
1661 #ifdef CONFIG_XFRM_SUB_POLICY
1662 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1663 	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1664 #else
1665 	return 0;
1666 #endif
1667 }
1668 
1669 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1670 				struct xfrm_policy **pols,
1671 				int *num_pols, int *num_xfrms)
1672 {
1673 	int i;
1674 
1675 	if (*num_pols == 0 || !pols[0]) {
1676 		*num_pols = 0;
1677 		*num_xfrms = 0;
1678 		return 0;
1679 	}
1680 	if (IS_ERR(pols[0]))
1681 		return PTR_ERR(pols[0]);
1682 
1683 	*num_xfrms = pols[0]->xfrm_nr;
1684 
1685 #ifdef CONFIG_XFRM_SUB_POLICY
1686 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1687 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1688 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1689 						    XFRM_POLICY_TYPE_MAIN,
1690 						    fl, family,
1691 						    XFRM_POLICY_OUT);
1692 		if (pols[1]) {
1693 			if (IS_ERR(pols[1])) {
1694 				xfrm_pols_put(pols, *num_pols);
1695 				return PTR_ERR(pols[1]);
1696 			}
1697 			(*num_pols)++;
1698 			(*num_xfrms) += pols[1]->xfrm_nr;
1699 		}
1700 	}
1701 #endif
1702 	for (i = 0; i < *num_pols; i++) {
1703 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1704 			*num_xfrms = -1;
1705 			break;
1706 		}
1707 	}
1708 
1709 	return 0;
1710 
1711 }
1712 
1713 static struct xfrm_dst *
1714 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1715 			       const struct flowi *fl, u16 family,
1716 			       struct dst_entry *dst_orig)
1717 {
1718 	struct net *net = xp_net(pols[0]);
1719 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1720 	struct dst_entry *dst;
1721 	struct xfrm_dst *xdst;
1722 	int err;
1723 
1724 	/* Try to instantiate a bundle */
1725 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1726 	if (err <= 0) {
1727 		if (err != 0 && err != -EAGAIN)
1728 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1729 		return ERR_PTR(err);
1730 	}
1731 
1732 	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1733 	if (IS_ERR(dst)) {
1734 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1735 		return ERR_CAST(dst);
1736 	}
1737 
1738 	xdst = (struct xfrm_dst *)dst;
1739 	xdst->num_xfrms = err;
1740 	if (num_pols > 1)
1741 		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1742 	else
1743 		err = xfrm_dst_update_origin(dst, fl);
1744 	if (unlikely(err)) {
1745 		dst_free(dst);
1746 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1747 		return ERR_PTR(err);
1748 	}
1749 
1750 	xdst->num_pols = num_pols;
1751 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1752 	xdst->policy_genid = atomic_read(&pols[0]->genid);
1753 
1754 	return xdst;
1755 }
1756 
1757 static void xfrm_policy_queue_process(unsigned long arg)
1758 {
1759 	int err = 0;
1760 	struct sk_buff *skb;
1761 	struct sock *sk;
1762 	struct dst_entry *dst;
1763 	struct xfrm_policy *pol = (struct xfrm_policy *)arg;
1764 	struct xfrm_policy_queue *pq = &pol->polq;
1765 	struct flowi fl;
1766 	struct sk_buff_head list;
1767 
1768 	spin_lock(&pq->hold_queue.lock);
1769 	skb = skb_peek(&pq->hold_queue);
1770 	if (!skb) {
1771 		spin_unlock(&pq->hold_queue.lock);
1772 		goto out;
1773 	}
1774 	dst = skb_dst(skb);
1775 	sk = skb->sk;
1776 	xfrm_decode_session(skb, &fl, dst->ops->family);
1777 	spin_unlock(&pq->hold_queue.lock);
1778 
1779 	dst_hold(dst->path);
1780 	dst = xfrm_lookup(xp_net(pol), dst->path, &fl,
1781 			  sk, 0);
1782 	if (IS_ERR(dst))
1783 		goto purge_queue;
1784 
1785 	if (dst->flags & DST_XFRM_QUEUE) {
1786 		dst_release(dst);
1787 
1788 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1789 			goto purge_queue;
1790 
1791 		pq->timeout = pq->timeout << 1;
1792 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1793 			xfrm_pol_hold(pol);
1794 	goto out;
1795 	}
1796 
1797 	dst_release(dst);
1798 
1799 	__skb_queue_head_init(&list);
1800 
1801 	spin_lock(&pq->hold_queue.lock);
1802 	pq->timeout = 0;
1803 	skb_queue_splice_init(&pq->hold_queue, &list);
1804 	spin_unlock(&pq->hold_queue.lock);
1805 
1806 	while (!skb_queue_empty(&list)) {
1807 		skb = __skb_dequeue(&list);
1808 
1809 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1810 		dst_hold(skb_dst(skb)->path);
1811 		dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
1812 				  &fl, skb->sk, 0);
1813 		if (IS_ERR(dst)) {
1814 			kfree_skb(skb);
1815 			continue;
1816 		}
1817 
1818 		nf_reset(skb);
1819 		skb_dst_drop(skb);
1820 		skb_dst_set(skb, dst);
1821 
1822 		err = dst_output(skb);
1823 	}
1824 
1825 out:
1826 	xfrm_pol_put(pol);
1827 	return;
1828 
1829 purge_queue:
1830 	pq->timeout = 0;
1831 	xfrm_queue_purge(&pq->hold_queue);
1832 	xfrm_pol_put(pol);
1833 }
1834 
1835 static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
1836 {
1837 	unsigned long sched_next;
1838 	struct dst_entry *dst = skb_dst(skb);
1839 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1840 	struct xfrm_policy *pol = xdst->pols[0];
1841 	struct xfrm_policy_queue *pq = &pol->polq;
1842 	const struct sk_buff *fclone = skb + 1;
1843 
1844 	if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
1845 		     fclone->fclone == SKB_FCLONE_CLONE)) {
1846 		kfree_skb(skb);
1847 		return 0;
1848 	}
1849 
1850 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1851 		kfree_skb(skb);
1852 		return -EAGAIN;
1853 	}
1854 
1855 	skb_dst_force(skb);
1856 
1857 	spin_lock_bh(&pq->hold_queue.lock);
1858 
1859 	if (!pq->timeout)
1860 		pq->timeout = XFRM_QUEUE_TMO_MIN;
1861 
1862 	sched_next = jiffies + pq->timeout;
1863 
1864 	if (del_timer(&pq->hold_timer)) {
1865 		if (time_before(pq->hold_timer.expires, sched_next))
1866 			sched_next = pq->hold_timer.expires;
1867 		xfrm_pol_put(pol);
1868 	}
1869 
1870 	__skb_queue_tail(&pq->hold_queue, skb);
1871 	if (!mod_timer(&pq->hold_timer, sched_next))
1872 		xfrm_pol_hold(pol);
1873 
1874 	spin_unlock_bh(&pq->hold_queue.lock);
1875 
1876 	return 0;
1877 }
1878 
1879 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1880 						 struct dst_entry *dst,
1881 						 const struct flowi *fl,
1882 						 int num_xfrms,
1883 						 u16 family)
1884 {
1885 	int err;
1886 	struct net_device *dev;
1887 	struct dst_entry *dst1;
1888 	struct xfrm_dst *xdst;
1889 
1890 	xdst = xfrm_alloc_dst(net, family);
1891 	if (IS_ERR(xdst))
1892 		return xdst;
1893 
1894 	if (net->xfrm.sysctl_larval_drop || num_xfrms <= 0)
1895 		return xdst;
1896 
1897 	dst1 = &xdst->u.dst;
1898 	dst_hold(dst);
1899 	xdst->route = dst;
1900 
1901 	dst_copy_metrics(dst1, dst);
1902 
1903 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1904 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1905 	dst1->lastuse = jiffies;
1906 
1907 	dst1->input = dst_discard;
1908 	dst1->output = xdst_queue_output;
1909 
1910 	dst_hold(dst);
1911 	dst1->child = dst;
1912 	dst1->path = dst;
1913 
1914 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1915 
1916 	err = -ENODEV;
1917 	dev = dst->dev;
1918 	if (!dev)
1919 		goto free_dst;
1920 
1921 	err = xfrm_fill_dst(xdst, dev, fl);
1922 	if (err)
1923 		goto free_dst;
1924 
1925 out:
1926 	return xdst;
1927 
1928 free_dst:
1929 	dst_release(dst1);
1930 	xdst = ERR_PTR(err);
1931 	goto out;
1932 }
1933 
1934 static struct flow_cache_object *
1935 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1936 		   struct flow_cache_object *oldflo, void *ctx)
1937 {
1938 	struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1939 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1940 	struct xfrm_dst *xdst, *new_xdst;
1941 	int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1942 
1943 	/* Check if the policies from old bundle are usable */
1944 	xdst = NULL;
1945 	if (oldflo) {
1946 		xdst = container_of(oldflo, struct xfrm_dst, flo);
1947 		num_pols = xdst->num_pols;
1948 		num_xfrms = xdst->num_xfrms;
1949 		pol_dead = 0;
1950 		for (i = 0; i < num_pols; i++) {
1951 			pols[i] = xdst->pols[i];
1952 			pol_dead |= pols[i]->walk.dead;
1953 		}
1954 		if (pol_dead) {
1955 			dst_free(&xdst->u.dst);
1956 			xdst = NULL;
1957 			num_pols = 0;
1958 			num_xfrms = 0;
1959 			oldflo = NULL;
1960 		}
1961 	}
1962 
1963 	/* Resolve policies to use if we couldn't get them from
1964 	 * previous cache entry */
1965 	if (xdst == NULL) {
1966 		num_pols = 1;
1967 		pols[0] = __xfrm_policy_lookup(net, fl, family,
1968 					       flow_to_policy_dir(dir));
1969 		err = xfrm_expand_policies(fl, family, pols,
1970 					   &num_pols, &num_xfrms);
1971 		if (err < 0)
1972 			goto inc_error;
1973 		if (num_pols == 0)
1974 			return NULL;
1975 		if (num_xfrms <= 0)
1976 			goto make_dummy_bundle;
1977 	}
1978 
1979 	new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1980 	if (IS_ERR(new_xdst)) {
1981 		err = PTR_ERR(new_xdst);
1982 		if (err != -EAGAIN)
1983 			goto error;
1984 		if (oldflo == NULL)
1985 			goto make_dummy_bundle;
1986 		dst_hold(&xdst->u.dst);
1987 		return oldflo;
1988 	} else if (new_xdst == NULL) {
1989 		num_xfrms = 0;
1990 		if (oldflo == NULL)
1991 			goto make_dummy_bundle;
1992 		xdst->num_xfrms = 0;
1993 		dst_hold(&xdst->u.dst);
1994 		return oldflo;
1995 	}
1996 
1997 	/* Kill the previous bundle */
1998 	if (xdst) {
1999 		/* The policies were stolen for newly generated bundle */
2000 		xdst->num_pols = 0;
2001 		dst_free(&xdst->u.dst);
2002 	}
2003 
2004 	/* Flow cache does not have reference, it dst_free()'s,
2005 	 * but we do need to return one reference for original caller */
2006 	dst_hold(&new_xdst->u.dst);
2007 	return &new_xdst->flo;
2008 
2009 make_dummy_bundle:
2010 	/* We found policies, but there's no bundles to instantiate:
2011 	 * either because the policy blocks, has no transformations or
2012 	 * we could not build template (no xfrm_states).*/
2013 	xdst = xfrm_create_dummy_bundle(net, dst_orig, fl, num_xfrms, family);
2014 	if (IS_ERR(xdst)) {
2015 		xfrm_pols_put(pols, num_pols);
2016 		return ERR_CAST(xdst);
2017 	}
2018 	xdst->num_pols = num_pols;
2019 	xdst->num_xfrms = num_xfrms;
2020 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2021 
2022 	dst_hold(&xdst->u.dst);
2023 	return &xdst->flo;
2024 
2025 inc_error:
2026 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2027 error:
2028 	if (xdst != NULL)
2029 		dst_free(&xdst->u.dst);
2030 	else
2031 		xfrm_pols_put(pols, num_pols);
2032 	return ERR_PTR(err);
2033 }
2034 
2035 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2036 					struct dst_entry *dst_orig)
2037 {
2038 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2039 	struct dst_entry *ret;
2040 
2041 	if (!afinfo) {
2042 		dst_release(dst_orig);
2043 		return ERR_PTR(-EINVAL);
2044 	} else {
2045 		ret = afinfo->blackhole_route(net, dst_orig);
2046 	}
2047 	xfrm_policy_put_afinfo(afinfo);
2048 
2049 	return ret;
2050 }
2051 
2052 /* Main function: finds/creates a bundle for given flow.
2053  *
2054  * At the moment we eat a raw IP route. Mostly to speed up lookups
2055  * on interfaces with disabled IPsec.
2056  */
2057 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2058 			      const struct flowi *fl,
2059 			      struct sock *sk, int flags)
2060 {
2061 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2062 	struct flow_cache_object *flo;
2063 	struct xfrm_dst *xdst;
2064 	struct dst_entry *dst, *route;
2065 	u16 family = dst_orig->ops->family;
2066 	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
2067 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2068 
2069 	dst = NULL;
2070 	xdst = NULL;
2071 	route = NULL;
2072 
2073 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2074 		num_pols = 1;
2075 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
2076 		err = xfrm_expand_policies(fl, family, pols,
2077 					   &num_pols, &num_xfrms);
2078 		if (err < 0)
2079 			goto dropdst;
2080 
2081 		if (num_pols) {
2082 			if (num_xfrms <= 0) {
2083 				drop_pols = num_pols;
2084 				goto no_transform;
2085 			}
2086 
2087 			xdst = xfrm_resolve_and_create_bundle(
2088 					pols, num_pols, fl,
2089 					family, dst_orig);
2090 			if (IS_ERR(xdst)) {
2091 				xfrm_pols_put(pols, num_pols);
2092 				err = PTR_ERR(xdst);
2093 				goto dropdst;
2094 			} else if (xdst == NULL) {
2095 				num_xfrms = 0;
2096 				drop_pols = num_pols;
2097 				goto no_transform;
2098 			}
2099 
2100 			route = xdst->route;
2101 		}
2102 	}
2103 
2104 	if (xdst == NULL) {
2105 		/* To accelerate a bit...  */
2106 		if ((dst_orig->flags & DST_NOXFRM) ||
2107 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2108 			goto nopol;
2109 
2110 		flo = flow_cache_lookup(net, fl, family, dir,
2111 					xfrm_bundle_lookup, dst_orig);
2112 		if (flo == NULL)
2113 			goto nopol;
2114 		if (IS_ERR(flo)) {
2115 			err = PTR_ERR(flo);
2116 			goto dropdst;
2117 		}
2118 		xdst = container_of(flo, struct xfrm_dst, flo);
2119 
2120 		num_pols = xdst->num_pols;
2121 		num_xfrms = xdst->num_xfrms;
2122 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2123 		route = xdst->route;
2124 	}
2125 
2126 	dst = &xdst->u.dst;
2127 	if (route == NULL && num_xfrms > 0) {
2128 		/* The only case when xfrm_bundle_lookup() returns a
2129 		 * bundle with null route, is when the template could
2130 		 * not be resolved. It means policies are there, but
2131 		 * bundle could not be created, since we don't yet
2132 		 * have the xfrm_state's. We need to wait for KM to
2133 		 * negotiate new SA's or bail out with error.*/
2134 		if (net->xfrm.sysctl_larval_drop) {
2135 			dst_release(dst);
2136 			xfrm_pols_put(pols, drop_pols);
2137 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2138 
2139 			return make_blackhole(net, family, dst_orig);
2140 		}
2141 
2142 		err = -EAGAIN;
2143 
2144 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2145 		goto error;
2146 	}
2147 
2148 no_transform:
2149 	if (num_pols == 0)
2150 		goto nopol;
2151 
2152 	if ((flags & XFRM_LOOKUP_ICMP) &&
2153 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2154 		err = -ENOENT;
2155 		goto error;
2156 	}
2157 
2158 	for (i = 0; i < num_pols; i++)
2159 		pols[i]->curlft.use_time = get_seconds();
2160 
2161 	if (num_xfrms < 0) {
2162 		/* Prohibit the flow */
2163 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2164 		err = -EPERM;
2165 		goto error;
2166 	} else if (num_xfrms > 0) {
2167 		/* Flow transformed */
2168 		dst_release(dst_orig);
2169 	} else {
2170 		/* Flow passes untransformed */
2171 		dst_release(dst);
2172 		dst = dst_orig;
2173 	}
2174 ok:
2175 	xfrm_pols_put(pols, drop_pols);
2176 	if (dst && dst->xfrm &&
2177 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2178 		dst->flags |= DST_XFRM_TUNNEL;
2179 	return dst;
2180 
2181 nopol:
2182 	if (!(flags & XFRM_LOOKUP_ICMP)) {
2183 		dst = dst_orig;
2184 		goto ok;
2185 	}
2186 	err = -ENOENT;
2187 error:
2188 	dst_release(dst);
2189 dropdst:
2190 	dst_release(dst_orig);
2191 	xfrm_pols_put(pols, drop_pols);
2192 	return ERR_PTR(err);
2193 }
2194 EXPORT_SYMBOL(xfrm_lookup);
2195 
2196 static inline int
2197 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2198 {
2199 	struct xfrm_state *x;
2200 
2201 	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2202 		return 0;
2203 	x = skb->sp->xvec[idx];
2204 	if (!x->type->reject)
2205 		return 0;
2206 	return x->type->reject(x, skb, fl);
2207 }
2208 
2209 /* When skb is transformed back to its "native" form, we have to
2210  * check policy restrictions. At the moment we make this in maximally
2211  * stupid way. Shame on me. :-) Of course, connected sockets must
2212  * have policy cached at them.
2213  */
2214 
2215 static inline int
2216 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2217 	      unsigned short family)
2218 {
2219 	if (xfrm_state_kern(x))
2220 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2221 	return	x->id.proto == tmpl->id.proto &&
2222 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2223 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2224 		x->props.mode == tmpl->mode &&
2225 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2226 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2227 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2228 		  xfrm_state_addr_cmp(tmpl, x, family));
2229 }
2230 
2231 /*
2232  * 0 or more than 0 is returned when validation is succeeded (either bypass
2233  * because of optional transport mode, or next index of the mathced secpath
2234  * state with the template.
2235  * -1 is returned when no matching template is found.
2236  * Otherwise "-2 - errored_index" is returned.
2237  */
2238 static inline int
2239 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2240 	       unsigned short family)
2241 {
2242 	int idx = start;
2243 
2244 	if (tmpl->optional) {
2245 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2246 			return start;
2247 	} else
2248 		start = -1;
2249 	for (; idx < sp->len; idx++) {
2250 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2251 			return ++idx;
2252 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2253 			if (start == -1)
2254 				start = -2-idx;
2255 			break;
2256 		}
2257 	}
2258 	return start;
2259 }
2260 
2261 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2262 			  unsigned int family, int reverse)
2263 {
2264 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2265 	int err;
2266 
2267 	if (unlikely(afinfo == NULL))
2268 		return -EAFNOSUPPORT;
2269 
2270 	afinfo->decode_session(skb, fl, reverse);
2271 	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2272 	xfrm_policy_put_afinfo(afinfo);
2273 	return err;
2274 }
2275 EXPORT_SYMBOL(__xfrm_decode_session);
2276 
2277 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2278 {
2279 	for (; k < sp->len; k++) {
2280 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2281 			*idxp = k;
2282 			return 1;
2283 		}
2284 	}
2285 
2286 	return 0;
2287 }
2288 
2289 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2290 			unsigned short family)
2291 {
2292 	struct net *net = dev_net(skb->dev);
2293 	struct xfrm_policy *pol;
2294 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2295 	int npols = 0;
2296 	int xfrm_nr;
2297 	int pi;
2298 	int reverse;
2299 	struct flowi fl;
2300 	u8 fl_dir;
2301 	int xerr_idx = -1;
2302 
2303 	reverse = dir & ~XFRM_POLICY_MASK;
2304 	dir &= XFRM_POLICY_MASK;
2305 	fl_dir = policy_to_flow_dir(dir);
2306 
2307 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2308 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2309 		return 0;
2310 	}
2311 
2312 	nf_nat_decode_session(skb, &fl, family);
2313 
2314 	/* First, check used SA against their selectors. */
2315 	if (skb->sp) {
2316 		int i;
2317 
2318 		for (i = skb->sp->len-1; i >= 0; i--) {
2319 			struct xfrm_state *x = skb->sp->xvec[i];
2320 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2321 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2322 				return 0;
2323 			}
2324 		}
2325 	}
2326 
2327 	pol = NULL;
2328 	if (sk && sk->sk_policy[dir]) {
2329 		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
2330 		if (IS_ERR(pol)) {
2331 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2332 			return 0;
2333 		}
2334 	}
2335 
2336 	if (!pol) {
2337 		struct flow_cache_object *flo;
2338 
2339 		flo = flow_cache_lookup(net, &fl, family, fl_dir,
2340 					xfrm_policy_lookup, NULL);
2341 		if (IS_ERR_OR_NULL(flo))
2342 			pol = ERR_CAST(flo);
2343 		else
2344 			pol = container_of(flo, struct xfrm_policy, flo);
2345 	}
2346 
2347 	if (IS_ERR(pol)) {
2348 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2349 		return 0;
2350 	}
2351 
2352 	if (!pol) {
2353 		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2354 			xfrm_secpath_reject(xerr_idx, skb, &fl);
2355 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2356 			return 0;
2357 		}
2358 		return 1;
2359 	}
2360 
2361 	pol->curlft.use_time = get_seconds();
2362 
2363 	pols[0] = pol;
2364 	npols++;
2365 #ifdef CONFIG_XFRM_SUB_POLICY
2366 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2367 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2368 						    &fl, family,
2369 						    XFRM_POLICY_IN);
2370 		if (pols[1]) {
2371 			if (IS_ERR(pols[1])) {
2372 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2373 				return 0;
2374 			}
2375 			pols[1]->curlft.use_time = get_seconds();
2376 			npols++;
2377 		}
2378 	}
2379 #endif
2380 
2381 	if (pol->action == XFRM_POLICY_ALLOW) {
2382 		struct sec_path *sp;
2383 		static struct sec_path dummy;
2384 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2385 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2386 		struct xfrm_tmpl **tpp = tp;
2387 		int ti = 0;
2388 		int i, k;
2389 
2390 		if ((sp = skb->sp) == NULL)
2391 			sp = &dummy;
2392 
2393 		for (pi = 0; pi < npols; pi++) {
2394 			if (pols[pi] != pol &&
2395 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2396 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2397 				goto reject;
2398 			}
2399 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2400 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2401 				goto reject_error;
2402 			}
2403 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2404 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2405 		}
2406 		xfrm_nr = ti;
2407 		if (npols > 1) {
2408 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2409 			tpp = stp;
2410 		}
2411 
2412 		/* For each tunnel xfrm, find the first matching tmpl.
2413 		 * For each tmpl before that, find corresponding xfrm.
2414 		 * Order is _important_. Later we will implement
2415 		 * some barriers, but at the moment barriers
2416 		 * are implied between each two transformations.
2417 		 */
2418 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2419 			k = xfrm_policy_ok(tpp[i], sp, k, family);
2420 			if (k < 0) {
2421 				if (k < -1)
2422 					/* "-2 - errored_index" returned */
2423 					xerr_idx = -(2+k);
2424 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2425 				goto reject;
2426 			}
2427 		}
2428 
2429 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2430 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2431 			goto reject;
2432 		}
2433 
2434 		xfrm_pols_put(pols, npols);
2435 		return 1;
2436 	}
2437 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2438 
2439 reject:
2440 	xfrm_secpath_reject(xerr_idx, skb, &fl);
2441 reject_error:
2442 	xfrm_pols_put(pols, npols);
2443 	return 0;
2444 }
2445 EXPORT_SYMBOL(__xfrm_policy_check);
2446 
2447 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2448 {
2449 	struct net *net = dev_net(skb->dev);
2450 	struct flowi fl;
2451 	struct dst_entry *dst;
2452 	int res = 1;
2453 
2454 	if (xfrm_decode_session(skb, &fl, family) < 0) {
2455 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2456 		return 0;
2457 	}
2458 
2459 	skb_dst_force(skb);
2460 
2461 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0);
2462 	if (IS_ERR(dst)) {
2463 		res = 0;
2464 		dst = NULL;
2465 	}
2466 	skb_dst_set(skb, dst);
2467 	return res;
2468 }
2469 EXPORT_SYMBOL(__xfrm_route_forward);
2470 
2471 /* Optimize later using cookies and generation ids. */
2472 
2473 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2474 {
2475 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2476 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2477 	 * get validated by dst_ops->check on every use.  We do this
2478 	 * because when a normal route referenced by an XFRM dst is
2479 	 * obsoleted we do not go looking around for all parent
2480 	 * referencing XFRM dsts so that we can invalidate them.  It
2481 	 * is just too much work.  Instead we make the checks here on
2482 	 * every use.  For example:
2483 	 *
2484 	 *	XFRM dst A --> IPv4 dst X
2485 	 *
2486 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2487 	 * in this example).  If X is marked obsolete, "A" will not
2488 	 * notice.  That's what we are validating here via the
2489 	 * stale_bundle() check.
2490 	 *
2491 	 * When a policy's bundle is pruned, we dst_free() the XFRM
2492 	 * dst which causes it's ->obsolete field to be set to
2493 	 * DST_OBSOLETE_DEAD.  If an XFRM dst has been pruned like
2494 	 * this, we want to force a new route lookup.
2495 	 */
2496 	if (dst->obsolete < 0 && !stale_bundle(dst))
2497 		return dst;
2498 
2499 	return NULL;
2500 }
2501 
2502 static int stale_bundle(struct dst_entry *dst)
2503 {
2504 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2505 }
2506 
2507 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2508 {
2509 	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2510 		dst->dev = dev_net(dev)->loopback_dev;
2511 		dev_hold(dst->dev);
2512 		dev_put(dev);
2513 	}
2514 }
2515 EXPORT_SYMBOL(xfrm_dst_ifdown);
2516 
2517 static void xfrm_link_failure(struct sk_buff *skb)
2518 {
2519 	/* Impossible. Such dst must be popped before reaches point of failure. */
2520 }
2521 
2522 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2523 {
2524 	if (dst) {
2525 		if (dst->obsolete) {
2526 			dst_release(dst);
2527 			dst = NULL;
2528 		}
2529 	}
2530 	return dst;
2531 }
2532 
2533 void xfrm_garbage_collect(struct net *net)
2534 {
2535 	flow_cache_flush(net);
2536 }
2537 EXPORT_SYMBOL(xfrm_garbage_collect);
2538 
2539 static void xfrm_garbage_collect_deferred(struct net *net)
2540 {
2541 	flow_cache_flush_deferred(net);
2542 }
2543 
2544 static void xfrm_init_pmtu(struct dst_entry *dst)
2545 {
2546 	do {
2547 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2548 		u32 pmtu, route_mtu_cached;
2549 
2550 		pmtu = dst_mtu(dst->child);
2551 		xdst->child_mtu_cached = pmtu;
2552 
2553 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2554 
2555 		route_mtu_cached = dst_mtu(xdst->route);
2556 		xdst->route_mtu_cached = route_mtu_cached;
2557 
2558 		if (pmtu > route_mtu_cached)
2559 			pmtu = route_mtu_cached;
2560 
2561 		dst_metric_set(dst, RTAX_MTU, pmtu);
2562 	} while ((dst = dst->next));
2563 }
2564 
2565 /* Check that the bundle accepts the flow and its components are
2566  * still valid.
2567  */
2568 
2569 static int xfrm_bundle_ok(struct xfrm_dst *first)
2570 {
2571 	struct dst_entry *dst = &first->u.dst;
2572 	struct xfrm_dst *last;
2573 	u32 mtu;
2574 
2575 	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2576 	    (dst->dev && !netif_running(dst->dev)))
2577 		return 0;
2578 
2579 	if (dst->flags & DST_XFRM_QUEUE)
2580 		return 1;
2581 
2582 	last = NULL;
2583 
2584 	do {
2585 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2586 
2587 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2588 			return 0;
2589 		if (xdst->xfrm_genid != dst->xfrm->genid)
2590 			return 0;
2591 		if (xdst->num_pols > 0 &&
2592 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2593 			return 0;
2594 
2595 		mtu = dst_mtu(dst->child);
2596 		if (xdst->child_mtu_cached != mtu) {
2597 			last = xdst;
2598 			xdst->child_mtu_cached = mtu;
2599 		}
2600 
2601 		if (!dst_check(xdst->route, xdst->route_cookie))
2602 			return 0;
2603 		mtu = dst_mtu(xdst->route);
2604 		if (xdst->route_mtu_cached != mtu) {
2605 			last = xdst;
2606 			xdst->route_mtu_cached = mtu;
2607 		}
2608 
2609 		dst = dst->child;
2610 	} while (dst->xfrm);
2611 
2612 	if (likely(!last))
2613 		return 1;
2614 
2615 	mtu = last->child_mtu_cached;
2616 	for (;;) {
2617 		dst = &last->u.dst;
2618 
2619 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2620 		if (mtu > last->route_mtu_cached)
2621 			mtu = last->route_mtu_cached;
2622 		dst_metric_set(dst, RTAX_MTU, mtu);
2623 
2624 		if (last == first)
2625 			break;
2626 
2627 		last = (struct xfrm_dst *)last->u.dst.next;
2628 		last->child_mtu_cached = mtu;
2629 	}
2630 
2631 	return 1;
2632 }
2633 
2634 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2635 {
2636 	return dst_metric_advmss(dst->path);
2637 }
2638 
2639 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2640 {
2641 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2642 
2643 	return mtu ? : dst_mtu(dst->path);
2644 }
2645 
2646 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2647 					   struct sk_buff *skb,
2648 					   const void *daddr)
2649 {
2650 	return dst->path->ops->neigh_lookup(dst, skb, daddr);
2651 }
2652 
2653 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2654 {
2655 	struct net *net;
2656 	int err = 0;
2657 	if (unlikely(afinfo == NULL))
2658 		return -EINVAL;
2659 	if (unlikely(afinfo->family >= NPROTO))
2660 		return -EAFNOSUPPORT;
2661 	spin_lock(&xfrm_policy_afinfo_lock);
2662 	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2663 		err = -ENOBUFS;
2664 	else {
2665 		struct dst_ops *dst_ops = afinfo->dst_ops;
2666 		if (likely(dst_ops->kmem_cachep == NULL))
2667 			dst_ops->kmem_cachep = xfrm_dst_cache;
2668 		if (likely(dst_ops->check == NULL))
2669 			dst_ops->check = xfrm_dst_check;
2670 		if (likely(dst_ops->default_advmss == NULL))
2671 			dst_ops->default_advmss = xfrm_default_advmss;
2672 		if (likely(dst_ops->mtu == NULL))
2673 			dst_ops->mtu = xfrm_mtu;
2674 		if (likely(dst_ops->negative_advice == NULL))
2675 			dst_ops->negative_advice = xfrm_negative_advice;
2676 		if (likely(dst_ops->link_failure == NULL))
2677 			dst_ops->link_failure = xfrm_link_failure;
2678 		if (likely(dst_ops->neigh_lookup == NULL))
2679 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2680 		if (likely(afinfo->garbage_collect == NULL))
2681 			afinfo->garbage_collect = xfrm_garbage_collect_deferred;
2682 		rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
2683 	}
2684 	spin_unlock(&xfrm_policy_afinfo_lock);
2685 
2686 	rtnl_lock();
2687 	for_each_net(net) {
2688 		struct dst_ops *xfrm_dst_ops;
2689 
2690 		switch (afinfo->family) {
2691 		case AF_INET:
2692 			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
2693 			break;
2694 #if IS_ENABLED(CONFIG_IPV6)
2695 		case AF_INET6:
2696 			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
2697 			break;
2698 #endif
2699 		default:
2700 			BUG();
2701 		}
2702 		*xfrm_dst_ops = *afinfo->dst_ops;
2703 	}
2704 	rtnl_unlock();
2705 
2706 	return err;
2707 }
2708 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2709 
2710 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2711 {
2712 	int err = 0;
2713 	if (unlikely(afinfo == NULL))
2714 		return -EINVAL;
2715 	if (unlikely(afinfo->family >= NPROTO))
2716 		return -EAFNOSUPPORT;
2717 	spin_lock(&xfrm_policy_afinfo_lock);
2718 	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2719 		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2720 			err = -EINVAL;
2721 		else
2722 			RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
2723 					 NULL);
2724 	}
2725 	spin_unlock(&xfrm_policy_afinfo_lock);
2726 	if (!err) {
2727 		struct dst_ops *dst_ops = afinfo->dst_ops;
2728 
2729 		synchronize_rcu();
2730 
2731 		dst_ops->kmem_cachep = NULL;
2732 		dst_ops->check = NULL;
2733 		dst_ops->negative_advice = NULL;
2734 		dst_ops->link_failure = NULL;
2735 		afinfo->garbage_collect = NULL;
2736 	}
2737 	return err;
2738 }
2739 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2740 
2741 static void __net_init xfrm_dst_ops_init(struct net *net)
2742 {
2743 	struct xfrm_policy_afinfo *afinfo;
2744 
2745 	rcu_read_lock();
2746 	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
2747 	if (afinfo)
2748 		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
2749 #if IS_ENABLED(CONFIG_IPV6)
2750 	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
2751 	if (afinfo)
2752 		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
2753 #endif
2754 	rcu_read_unlock();
2755 }
2756 
2757 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2758 {
2759 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2760 
2761 	switch (event) {
2762 	case NETDEV_DOWN:
2763 		xfrm_garbage_collect(dev_net(dev));
2764 	}
2765 	return NOTIFY_DONE;
2766 }
2767 
2768 static struct notifier_block xfrm_dev_notifier = {
2769 	.notifier_call	= xfrm_dev_event,
2770 };
2771 
2772 #ifdef CONFIG_XFRM_STATISTICS
2773 static int __net_init xfrm_statistics_init(struct net *net)
2774 {
2775 	int rv;
2776 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2777 	if (!net->mib.xfrm_statistics)
2778 		return -ENOMEM;
2779 	rv = xfrm_proc_init(net);
2780 	if (rv < 0)
2781 		free_percpu(net->mib.xfrm_statistics);
2782 	return rv;
2783 }
2784 
2785 static void xfrm_statistics_fini(struct net *net)
2786 {
2787 	xfrm_proc_fini(net);
2788 	free_percpu(net->mib.xfrm_statistics);
2789 }
2790 #else
2791 static int __net_init xfrm_statistics_init(struct net *net)
2792 {
2793 	return 0;
2794 }
2795 
2796 static void xfrm_statistics_fini(struct net *net)
2797 {
2798 }
2799 #endif
2800 
2801 static int __net_init xfrm_policy_init(struct net *net)
2802 {
2803 	unsigned int hmask, sz;
2804 	int dir;
2805 
2806 	if (net_eq(net, &init_net))
2807 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2808 					   sizeof(struct xfrm_dst),
2809 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2810 					   NULL);
2811 
2812 	hmask = 8 - 1;
2813 	sz = (hmask+1) * sizeof(struct hlist_head);
2814 
2815 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2816 	if (!net->xfrm.policy_byidx)
2817 		goto out_byidx;
2818 	net->xfrm.policy_idx_hmask = hmask;
2819 
2820 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2821 		struct xfrm_policy_hash *htab;
2822 
2823 		net->xfrm.policy_count[dir] = 0;
2824 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2825 
2826 		htab = &net->xfrm.policy_bydst[dir];
2827 		htab->table = xfrm_hash_alloc(sz);
2828 		if (!htab->table)
2829 			goto out_bydst;
2830 		htab->hmask = hmask;
2831 	}
2832 
2833 	INIT_LIST_HEAD(&net->xfrm.policy_all);
2834 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2835 	if (net_eq(net, &init_net))
2836 		register_netdevice_notifier(&xfrm_dev_notifier);
2837 	return 0;
2838 
2839 out_bydst:
2840 	for (dir--; dir >= 0; dir--) {
2841 		struct xfrm_policy_hash *htab;
2842 
2843 		htab = &net->xfrm.policy_bydst[dir];
2844 		xfrm_hash_free(htab->table, sz);
2845 	}
2846 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2847 out_byidx:
2848 	return -ENOMEM;
2849 }
2850 
2851 static void xfrm_policy_fini(struct net *net)
2852 {
2853 	unsigned int sz;
2854 	int dir;
2855 
2856 	flush_work(&net->xfrm.policy_hash_work);
2857 #ifdef CONFIG_XFRM_SUB_POLICY
2858 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2859 #endif
2860 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2861 
2862 	WARN_ON(!list_empty(&net->xfrm.policy_all));
2863 
2864 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2865 		struct xfrm_policy_hash *htab;
2866 
2867 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2868 
2869 		htab = &net->xfrm.policy_bydst[dir];
2870 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2871 		WARN_ON(!hlist_empty(htab->table));
2872 		xfrm_hash_free(htab->table, sz);
2873 	}
2874 
2875 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2876 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2877 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2878 }
2879 
2880 static int __net_init xfrm_net_init(struct net *net)
2881 {
2882 	int rv;
2883 
2884 	rv = xfrm_statistics_init(net);
2885 	if (rv < 0)
2886 		goto out_statistics;
2887 	rv = xfrm_state_init(net);
2888 	if (rv < 0)
2889 		goto out_state;
2890 	rv = xfrm_policy_init(net);
2891 	if (rv < 0)
2892 		goto out_policy;
2893 	xfrm_dst_ops_init(net);
2894 	rv = xfrm_sysctl_init(net);
2895 	if (rv < 0)
2896 		goto out_sysctl;
2897 	rv = flow_cache_init(net);
2898 	if (rv < 0)
2899 		goto out;
2900 
2901 	/* Initialize the per-net locks here */
2902 	spin_lock_init(&net->xfrm.xfrm_state_lock);
2903 	rwlock_init(&net->xfrm.xfrm_policy_lock);
2904 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
2905 
2906 	return 0;
2907 
2908 out:
2909 	xfrm_sysctl_fini(net);
2910 out_sysctl:
2911 	xfrm_policy_fini(net);
2912 out_policy:
2913 	xfrm_state_fini(net);
2914 out_state:
2915 	xfrm_statistics_fini(net);
2916 out_statistics:
2917 	return rv;
2918 }
2919 
2920 static void __net_exit xfrm_net_exit(struct net *net)
2921 {
2922 	flow_cache_fini(net);
2923 	xfrm_sysctl_fini(net);
2924 	xfrm_policy_fini(net);
2925 	xfrm_state_fini(net);
2926 	xfrm_statistics_fini(net);
2927 }
2928 
2929 static struct pernet_operations __net_initdata xfrm_net_ops = {
2930 	.init = xfrm_net_init,
2931 	.exit = xfrm_net_exit,
2932 };
2933 
2934 void __init xfrm_init(void)
2935 {
2936 	register_pernet_subsys(&xfrm_net_ops);
2937 	xfrm_input_init();
2938 }
2939 
2940 #ifdef CONFIG_AUDITSYSCALL
2941 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2942 					 struct audit_buffer *audit_buf)
2943 {
2944 	struct xfrm_sec_ctx *ctx = xp->security;
2945 	struct xfrm_selector *sel = &xp->selector;
2946 
2947 	if (ctx)
2948 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2949 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2950 
2951 	switch (sel->family) {
2952 	case AF_INET:
2953 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2954 		if (sel->prefixlen_s != 32)
2955 			audit_log_format(audit_buf, " src_prefixlen=%d",
2956 					 sel->prefixlen_s);
2957 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2958 		if (sel->prefixlen_d != 32)
2959 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2960 					 sel->prefixlen_d);
2961 		break;
2962 	case AF_INET6:
2963 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2964 		if (sel->prefixlen_s != 128)
2965 			audit_log_format(audit_buf, " src_prefixlen=%d",
2966 					 sel->prefixlen_s);
2967 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2968 		if (sel->prefixlen_d != 128)
2969 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2970 					 sel->prefixlen_d);
2971 		break;
2972 	}
2973 }
2974 
2975 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
2976 {
2977 	struct audit_buffer *audit_buf;
2978 
2979 	audit_buf = xfrm_audit_start("SPD-add");
2980 	if (audit_buf == NULL)
2981 		return;
2982 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2983 	audit_log_format(audit_buf, " res=%u", result);
2984 	xfrm_audit_common_policyinfo(xp, audit_buf);
2985 	audit_log_end(audit_buf);
2986 }
2987 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2988 
2989 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2990 			      bool task_valid)
2991 {
2992 	struct audit_buffer *audit_buf;
2993 
2994 	audit_buf = xfrm_audit_start("SPD-delete");
2995 	if (audit_buf == NULL)
2996 		return;
2997 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
2998 	audit_log_format(audit_buf, " res=%u", result);
2999 	xfrm_audit_common_policyinfo(xp, audit_buf);
3000 	audit_log_end(audit_buf);
3001 }
3002 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3003 #endif
3004 
3005 #ifdef CONFIG_XFRM_MIGRATE
3006 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3007 					const struct xfrm_selector *sel_tgt)
3008 {
3009 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3010 		if (sel_tgt->family == sel_cmp->family &&
3011 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3012 				    sel_cmp->family) &&
3013 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3014 				    sel_cmp->family) &&
3015 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3016 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3017 			return true;
3018 		}
3019 	} else {
3020 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3021 			return true;
3022 		}
3023 	}
3024 	return false;
3025 }
3026 
3027 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3028 						    u8 dir, u8 type, struct net *net)
3029 {
3030 	struct xfrm_policy *pol, *ret = NULL;
3031 	struct hlist_head *chain;
3032 	u32 priority = ~0U;
3033 
3034 	read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/
3035 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3036 	hlist_for_each_entry(pol, chain, bydst) {
3037 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3038 		    pol->type == type) {
3039 			ret = pol;
3040 			priority = ret->priority;
3041 			break;
3042 		}
3043 	}
3044 	chain = &net->xfrm.policy_inexact[dir];
3045 	hlist_for_each_entry(pol, chain, bydst) {
3046 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3047 		    pol->type == type &&
3048 		    pol->priority < priority) {
3049 			ret = pol;
3050 			break;
3051 		}
3052 	}
3053 
3054 	if (ret)
3055 		xfrm_pol_hold(ret);
3056 
3057 	read_unlock_bh(&net->xfrm.xfrm_policy_lock);
3058 
3059 	return ret;
3060 }
3061 
3062 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3063 {
3064 	int match = 0;
3065 
3066 	if (t->mode == m->mode && t->id.proto == m->proto &&
3067 	    (m->reqid == 0 || t->reqid == m->reqid)) {
3068 		switch (t->mode) {
3069 		case XFRM_MODE_TUNNEL:
3070 		case XFRM_MODE_BEET:
3071 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3072 					    m->old_family) &&
3073 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3074 					    m->old_family)) {
3075 				match = 1;
3076 			}
3077 			break;
3078 		case XFRM_MODE_TRANSPORT:
3079 			/* in case of transport mode, template does not store
3080 			   any IP addresses, hence we just compare mode and
3081 			   protocol */
3082 			match = 1;
3083 			break;
3084 		default:
3085 			break;
3086 		}
3087 	}
3088 	return match;
3089 }
3090 
3091 /* update endpoint address(es) of template(s) */
3092 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3093 			       struct xfrm_migrate *m, int num_migrate)
3094 {
3095 	struct xfrm_migrate *mp;
3096 	int i, j, n = 0;
3097 
3098 	write_lock_bh(&pol->lock);
3099 	if (unlikely(pol->walk.dead)) {
3100 		/* target policy has been deleted */
3101 		write_unlock_bh(&pol->lock);
3102 		return -ENOENT;
3103 	}
3104 
3105 	for (i = 0; i < pol->xfrm_nr; i++) {
3106 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3107 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3108 				continue;
3109 			n++;
3110 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3111 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3112 				continue;
3113 			/* update endpoints */
3114 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3115 			       sizeof(pol->xfrm_vec[i].id.daddr));
3116 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3117 			       sizeof(pol->xfrm_vec[i].saddr));
3118 			pol->xfrm_vec[i].encap_family = mp->new_family;
3119 			/* flush bundles */
3120 			atomic_inc(&pol->genid);
3121 		}
3122 	}
3123 
3124 	write_unlock_bh(&pol->lock);
3125 
3126 	if (!n)
3127 		return -ENODATA;
3128 
3129 	return 0;
3130 }
3131 
3132 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3133 {
3134 	int i, j;
3135 
3136 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3137 		return -EINVAL;
3138 
3139 	for (i = 0; i < num_migrate; i++) {
3140 		if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr,
3141 				    m[i].old_family) &&
3142 		    xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr,
3143 				    m[i].old_family))
3144 			return -EINVAL;
3145 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3146 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3147 			return -EINVAL;
3148 
3149 		/* check if there is any duplicated entry */
3150 		for (j = i + 1; j < num_migrate; j++) {
3151 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3152 				    sizeof(m[i].old_daddr)) &&
3153 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3154 				    sizeof(m[i].old_saddr)) &&
3155 			    m[i].proto == m[j].proto &&
3156 			    m[i].mode == m[j].mode &&
3157 			    m[i].reqid == m[j].reqid &&
3158 			    m[i].old_family == m[j].old_family)
3159 				return -EINVAL;
3160 		}
3161 	}
3162 
3163 	return 0;
3164 }
3165 
3166 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3167 		 struct xfrm_migrate *m, int num_migrate,
3168 		 struct xfrm_kmaddress *k, struct net *net)
3169 {
3170 	int i, err, nx_cur = 0, nx_new = 0;
3171 	struct xfrm_policy *pol = NULL;
3172 	struct xfrm_state *x, *xc;
3173 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3174 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3175 	struct xfrm_migrate *mp;
3176 
3177 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3178 		goto out;
3179 
3180 	/* Stage 1 - find policy */
3181 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3182 		err = -ENOENT;
3183 		goto out;
3184 	}
3185 
3186 	/* Stage 2 - find and update state(s) */
3187 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3188 		if ((x = xfrm_migrate_state_find(mp, net))) {
3189 			x_cur[nx_cur] = x;
3190 			nx_cur++;
3191 			if ((xc = xfrm_state_migrate(x, mp))) {
3192 				x_new[nx_new] = xc;
3193 				nx_new++;
3194 			} else {
3195 				err = -ENODATA;
3196 				goto restore_state;
3197 			}
3198 		}
3199 	}
3200 
3201 	/* Stage 3 - update policy */
3202 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3203 		goto restore_state;
3204 
3205 	/* Stage 4 - delete old state(s) */
3206 	if (nx_cur) {
3207 		xfrm_states_put(x_cur, nx_cur);
3208 		xfrm_states_delete(x_cur, nx_cur);
3209 	}
3210 
3211 	/* Stage 5 - announce */
3212 	km_migrate(sel, dir, type, m, num_migrate, k);
3213 
3214 	xfrm_pol_put(pol);
3215 
3216 	return 0;
3217 out:
3218 	return err;
3219 
3220 restore_state:
3221 	if (pol)
3222 		xfrm_pol_put(pol);
3223 	if (nx_cur)
3224 		xfrm_states_put(x_cur, nx_cur);
3225 	if (nx_new)
3226 		xfrm_states_delete(x_new, nx_new);
3227 
3228 	return err;
3229 }
3230 EXPORT_SYMBOL(xfrm_migrate);
3231 #endif
3232