xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision aa0dc6a7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_policy.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	Kazunori MIYAZAWA @USAGI
11  * 	YOSHIFUJI Hideaki
12  * 		Split up af-specific portion
13  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14  *
15  */
16 
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/xfrm.h>
35 #include <net/ip.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
37 #include <net/mip6.h>
38 #endif
39 #ifdef CONFIG_XFRM_STATISTICS
40 #include <net/snmp.h>
41 #endif
42 #ifdef CONFIG_XFRM_ESPINTCP
43 #include <net/espintcp.h>
44 #endif
45 
46 #include "xfrm_hash.h"
47 
48 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
49 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
50 #define XFRM_MAX_QUEUE_LEN	100
51 
52 struct xfrm_flo {
53 	struct dst_entry *dst_orig;
54 	u8 flags;
55 };
56 
57 /* prefixes smaller than this are stored in lists, not trees. */
58 #define INEXACT_PREFIXLEN_IPV4	16
59 #define INEXACT_PREFIXLEN_IPV6	48
60 
61 struct xfrm_pol_inexact_node {
62 	struct rb_node node;
63 	union {
64 		xfrm_address_t addr;
65 		struct rcu_head rcu;
66 	};
67 	u8 prefixlen;
68 
69 	struct rb_root root;
70 
71 	/* the policies matching this node, can be empty list */
72 	struct hlist_head hhead;
73 };
74 
75 /* xfrm inexact policy search tree:
76  * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
77  *  |
78  * +---- root_d: sorted by daddr:prefix
79  * |                 |
80  * |        xfrm_pol_inexact_node
81  * |                 |
82  * |                 +- root: sorted by saddr/prefix
83  * |                 |              |
84  * |                 |         xfrm_pol_inexact_node
85  * |                 |              |
86  * |                 |              + root: unused
87  * |                 |              |
88  * |                 |              + hhead: saddr:daddr policies
89  * |                 |
90  * |                 +- coarse policies and all any:daddr policies
91  * |
92  * +---- root_s: sorted by saddr:prefix
93  * |                 |
94  * |        xfrm_pol_inexact_node
95  * |                 |
96  * |                 + root: unused
97  * |                 |
98  * |                 + hhead: saddr:any policies
99  * |
100  * +---- coarse policies and all any:any policies
101  *
102  * Lookups return four candidate lists:
103  * 1. any:any list from top-level xfrm_pol_inexact_bin
104  * 2. any:daddr list from daddr tree
105  * 3. saddr:daddr list from 2nd level daddr tree
106  * 4. saddr:any list from saddr tree
107  *
108  * This result set then needs to be searched for the policy with
109  * the lowest priority.  If two results have same prio, youngest one wins.
110  */
111 
112 struct xfrm_pol_inexact_key {
113 	possible_net_t net;
114 	u32 if_id;
115 	u16 family;
116 	u8 dir, type;
117 };
118 
119 struct xfrm_pol_inexact_bin {
120 	struct xfrm_pol_inexact_key k;
121 	struct rhash_head head;
122 	/* list containing '*:*' policies */
123 	struct hlist_head hhead;
124 
125 	seqcount_spinlock_t count;
126 	/* tree sorted by daddr/prefix */
127 	struct rb_root root_d;
128 
129 	/* tree sorted by saddr/prefix */
130 	struct rb_root root_s;
131 
132 	/* slow path below */
133 	struct list_head inexact_bins;
134 	struct rcu_head rcu;
135 };
136 
137 enum xfrm_pol_inexact_candidate_type {
138 	XFRM_POL_CAND_BOTH,
139 	XFRM_POL_CAND_SADDR,
140 	XFRM_POL_CAND_DADDR,
141 	XFRM_POL_CAND_ANY,
142 
143 	XFRM_POL_CAND_MAX,
144 };
145 
146 struct xfrm_pol_inexact_candidates {
147 	struct hlist_head *res[XFRM_POL_CAND_MAX];
148 };
149 
150 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
151 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
152 
153 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
154 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
155 						__read_mostly;
156 
157 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
158 static __read_mostly seqcount_mutex_t xfrm_policy_hash_generation;
159 
160 static struct rhashtable xfrm_policy_inexact_table;
161 static const struct rhashtable_params xfrm_pol_inexact_params;
162 
163 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
164 static int stale_bundle(struct dst_entry *dst);
165 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
166 static void xfrm_policy_queue_process(struct timer_list *t);
167 
168 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
169 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
170 						int dir);
171 
172 static struct xfrm_pol_inexact_bin *
173 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
174 			   u32 if_id);
175 
176 static struct xfrm_pol_inexact_bin *
177 xfrm_policy_inexact_lookup_rcu(struct net *net,
178 			       u8 type, u16 family, u8 dir, u32 if_id);
179 static struct xfrm_policy *
180 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
181 			bool excl);
182 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
183 					    struct xfrm_policy *policy);
184 
185 static bool
186 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
187 				    struct xfrm_pol_inexact_bin *b,
188 				    const xfrm_address_t *saddr,
189 				    const xfrm_address_t *daddr);
190 
191 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
192 {
193 	return refcount_inc_not_zero(&policy->refcnt);
194 }
195 
196 static inline bool
197 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
198 {
199 	const struct flowi4 *fl4 = &fl->u.ip4;
200 
201 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
202 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
203 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
204 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
205 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
206 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
207 }
208 
209 static inline bool
210 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
211 {
212 	const struct flowi6 *fl6 = &fl->u.ip6;
213 
214 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
215 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
216 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
217 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
218 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
219 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
220 }
221 
222 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
223 			 unsigned short family)
224 {
225 	switch (family) {
226 	case AF_INET:
227 		return __xfrm4_selector_match(sel, fl);
228 	case AF_INET6:
229 		return __xfrm6_selector_match(sel, fl);
230 	}
231 	return false;
232 }
233 
234 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
235 {
236 	const struct xfrm_policy_afinfo *afinfo;
237 
238 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
239 		return NULL;
240 	rcu_read_lock();
241 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
242 	if (unlikely(!afinfo))
243 		rcu_read_unlock();
244 	return afinfo;
245 }
246 
247 /* Called with rcu_read_lock(). */
248 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
249 {
250 	return rcu_dereference(xfrm_if_cb);
251 }
252 
253 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
254 				    const xfrm_address_t *saddr,
255 				    const xfrm_address_t *daddr,
256 				    int family, u32 mark)
257 {
258 	const struct xfrm_policy_afinfo *afinfo;
259 	struct dst_entry *dst;
260 
261 	afinfo = xfrm_policy_get_afinfo(family);
262 	if (unlikely(afinfo == NULL))
263 		return ERR_PTR(-EAFNOSUPPORT);
264 
265 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
266 
267 	rcu_read_unlock();
268 
269 	return dst;
270 }
271 EXPORT_SYMBOL(__xfrm_dst_lookup);
272 
273 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
274 						int tos, int oif,
275 						xfrm_address_t *prev_saddr,
276 						xfrm_address_t *prev_daddr,
277 						int family, u32 mark)
278 {
279 	struct net *net = xs_net(x);
280 	xfrm_address_t *saddr = &x->props.saddr;
281 	xfrm_address_t *daddr = &x->id.daddr;
282 	struct dst_entry *dst;
283 
284 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
285 		saddr = x->coaddr;
286 		daddr = prev_daddr;
287 	}
288 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
289 		saddr = prev_saddr;
290 		daddr = x->coaddr;
291 	}
292 
293 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
294 
295 	if (!IS_ERR(dst)) {
296 		if (prev_saddr != saddr)
297 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
298 		if (prev_daddr != daddr)
299 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
300 	}
301 
302 	return dst;
303 }
304 
305 static inline unsigned long make_jiffies(long secs)
306 {
307 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
308 		return MAX_SCHEDULE_TIMEOUT-1;
309 	else
310 		return secs*HZ;
311 }
312 
313 static void xfrm_policy_timer(struct timer_list *t)
314 {
315 	struct xfrm_policy *xp = from_timer(xp, t, timer);
316 	time64_t now = ktime_get_real_seconds();
317 	time64_t next = TIME64_MAX;
318 	int warn = 0;
319 	int dir;
320 
321 	read_lock(&xp->lock);
322 
323 	if (unlikely(xp->walk.dead))
324 		goto out;
325 
326 	dir = xfrm_policy_id2dir(xp->index);
327 
328 	if (xp->lft.hard_add_expires_seconds) {
329 		time64_t tmo = xp->lft.hard_add_expires_seconds +
330 			xp->curlft.add_time - now;
331 		if (tmo <= 0)
332 			goto expired;
333 		if (tmo < next)
334 			next = tmo;
335 	}
336 	if (xp->lft.hard_use_expires_seconds) {
337 		time64_t tmo = xp->lft.hard_use_expires_seconds +
338 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
339 		if (tmo <= 0)
340 			goto expired;
341 		if (tmo < next)
342 			next = tmo;
343 	}
344 	if (xp->lft.soft_add_expires_seconds) {
345 		time64_t tmo = xp->lft.soft_add_expires_seconds +
346 			xp->curlft.add_time - now;
347 		if (tmo <= 0) {
348 			warn = 1;
349 			tmo = XFRM_KM_TIMEOUT;
350 		}
351 		if (tmo < next)
352 			next = tmo;
353 	}
354 	if (xp->lft.soft_use_expires_seconds) {
355 		time64_t tmo = xp->lft.soft_use_expires_seconds +
356 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
357 		if (tmo <= 0) {
358 			warn = 1;
359 			tmo = XFRM_KM_TIMEOUT;
360 		}
361 		if (tmo < next)
362 			next = tmo;
363 	}
364 
365 	if (warn)
366 		km_policy_expired(xp, dir, 0, 0);
367 	if (next != TIME64_MAX &&
368 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
369 		xfrm_pol_hold(xp);
370 
371 out:
372 	read_unlock(&xp->lock);
373 	xfrm_pol_put(xp);
374 	return;
375 
376 expired:
377 	read_unlock(&xp->lock);
378 	if (!xfrm_policy_delete(xp, dir))
379 		km_policy_expired(xp, dir, 1, 0);
380 	xfrm_pol_put(xp);
381 }
382 
383 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
384  * SPD calls.
385  */
386 
387 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
388 {
389 	struct xfrm_policy *policy;
390 
391 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
392 
393 	if (policy) {
394 		write_pnet(&policy->xp_net, net);
395 		INIT_LIST_HEAD(&policy->walk.all);
396 		INIT_HLIST_NODE(&policy->bydst_inexact_list);
397 		INIT_HLIST_NODE(&policy->bydst);
398 		INIT_HLIST_NODE(&policy->byidx);
399 		rwlock_init(&policy->lock);
400 		refcount_set(&policy->refcnt, 1);
401 		skb_queue_head_init(&policy->polq.hold_queue);
402 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
403 		timer_setup(&policy->polq.hold_timer,
404 			    xfrm_policy_queue_process, 0);
405 	}
406 	return policy;
407 }
408 EXPORT_SYMBOL(xfrm_policy_alloc);
409 
410 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
411 {
412 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
413 
414 	security_xfrm_policy_free(policy->security);
415 	kfree(policy);
416 }
417 
418 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
419 
420 void xfrm_policy_destroy(struct xfrm_policy *policy)
421 {
422 	BUG_ON(!policy->walk.dead);
423 
424 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
425 		BUG();
426 
427 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
428 }
429 EXPORT_SYMBOL(xfrm_policy_destroy);
430 
431 /* Rule must be locked. Release descendant resources, announce
432  * entry dead. The rule must be unlinked from lists to the moment.
433  */
434 
435 static void xfrm_policy_kill(struct xfrm_policy *policy)
436 {
437 	write_lock_bh(&policy->lock);
438 	policy->walk.dead = 1;
439 	write_unlock_bh(&policy->lock);
440 
441 	atomic_inc(&policy->genid);
442 
443 	if (del_timer(&policy->polq.hold_timer))
444 		xfrm_pol_put(policy);
445 	skb_queue_purge(&policy->polq.hold_queue);
446 
447 	if (del_timer(&policy->timer))
448 		xfrm_pol_put(policy);
449 
450 	xfrm_pol_put(policy);
451 }
452 
453 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
454 
455 static inline unsigned int idx_hash(struct net *net, u32 index)
456 {
457 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
458 }
459 
460 /* calculate policy hash thresholds */
461 static void __get_hash_thresh(struct net *net,
462 			      unsigned short family, int dir,
463 			      u8 *dbits, u8 *sbits)
464 {
465 	switch (family) {
466 	case AF_INET:
467 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
468 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
469 		break;
470 
471 	case AF_INET6:
472 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
473 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
474 		break;
475 
476 	default:
477 		*dbits = 0;
478 		*sbits = 0;
479 	}
480 }
481 
482 static struct hlist_head *policy_hash_bysel(struct net *net,
483 					    const struct xfrm_selector *sel,
484 					    unsigned short family, int dir)
485 {
486 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
487 	unsigned int hash;
488 	u8 dbits;
489 	u8 sbits;
490 
491 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
492 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
493 
494 	if (hash == hmask + 1)
495 		return NULL;
496 
497 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
498 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
499 }
500 
501 static struct hlist_head *policy_hash_direct(struct net *net,
502 					     const xfrm_address_t *daddr,
503 					     const xfrm_address_t *saddr,
504 					     unsigned short family, int dir)
505 {
506 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
507 	unsigned int hash;
508 	u8 dbits;
509 	u8 sbits;
510 
511 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
512 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
513 
514 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
515 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
516 }
517 
518 static void xfrm_dst_hash_transfer(struct net *net,
519 				   struct hlist_head *list,
520 				   struct hlist_head *ndsttable,
521 				   unsigned int nhashmask,
522 				   int dir)
523 {
524 	struct hlist_node *tmp, *entry0 = NULL;
525 	struct xfrm_policy *pol;
526 	unsigned int h0 = 0;
527 	u8 dbits;
528 	u8 sbits;
529 
530 redo:
531 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
532 		unsigned int h;
533 
534 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
535 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
536 				pol->family, nhashmask, dbits, sbits);
537 		if (!entry0) {
538 			hlist_del_rcu(&pol->bydst);
539 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
540 			h0 = h;
541 		} else {
542 			if (h != h0)
543 				continue;
544 			hlist_del_rcu(&pol->bydst);
545 			hlist_add_behind_rcu(&pol->bydst, entry0);
546 		}
547 		entry0 = &pol->bydst;
548 	}
549 	if (!hlist_empty(list)) {
550 		entry0 = NULL;
551 		goto redo;
552 	}
553 }
554 
555 static void xfrm_idx_hash_transfer(struct hlist_head *list,
556 				   struct hlist_head *nidxtable,
557 				   unsigned int nhashmask)
558 {
559 	struct hlist_node *tmp;
560 	struct xfrm_policy *pol;
561 
562 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
563 		unsigned int h;
564 
565 		h = __idx_hash(pol->index, nhashmask);
566 		hlist_add_head(&pol->byidx, nidxtable+h);
567 	}
568 }
569 
570 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
571 {
572 	return ((old_hmask + 1) << 1) - 1;
573 }
574 
575 static void xfrm_bydst_resize(struct net *net, int dir)
576 {
577 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
578 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
579 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
580 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
581 	struct hlist_head *odst;
582 	int i;
583 
584 	if (!ndst)
585 		return;
586 
587 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
588 	write_seqcount_begin(&xfrm_policy_hash_generation);
589 
590 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
591 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
592 
593 	for (i = hmask; i >= 0; i--)
594 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
595 
596 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
597 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
598 
599 	write_seqcount_end(&xfrm_policy_hash_generation);
600 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
601 
602 	synchronize_rcu();
603 
604 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
605 }
606 
607 static void xfrm_byidx_resize(struct net *net, int total)
608 {
609 	unsigned int hmask = net->xfrm.policy_idx_hmask;
610 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
611 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
612 	struct hlist_head *oidx = net->xfrm.policy_byidx;
613 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
614 	int i;
615 
616 	if (!nidx)
617 		return;
618 
619 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
620 
621 	for (i = hmask; i >= 0; i--)
622 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
623 
624 	net->xfrm.policy_byidx = nidx;
625 	net->xfrm.policy_idx_hmask = nhashmask;
626 
627 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
628 
629 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
630 }
631 
632 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
633 {
634 	unsigned int cnt = net->xfrm.policy_count[dir];
635 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
636 
637 	if (total)
638 		*total += cnt;
639 
640 	if ((hmask + 1) < xfrm_policy_hashmax &&
641 	    cnt > hmask)
642 		return 1;
643 
644 	return 0;
645 }
646 
647 static inline int xfrm_byidx_should_resize(struct net *net, int total)
648 {
649 	unsigned int hmask = net->xfrm.policy_idx_hmask;
650 
651 	if ((hmask + 1) < xfrm_policy_hashmax &&
652 	    total > hmask)
653 		return 1;
654 
655 	return 0;
656 }
657 
658 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
659 {
660 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
661 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
662 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
663 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
664 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
665 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
666 	si->spdhcnt = net->xfrm.policy_idx_hmask;
667 	si->spdhmcnt = xfrm_policy_hashmax;
668 }
669 EXPORT_SYMBOL(xfrm_spd_getinfo);
670 
671 static DEFINE_MUTEX(hash_resize_mutex);
672 static void xfrm_hash_resize(struct work_struct *work)
673 {
674 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
675 	int dir, total;
676 
677 	mutex_lock(&hash_resize_mutex);
678 
679 	total = 0;
680 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
681 		if (xfrm_bydst_should_resize(net, dir, &total))
682 			xfrm_bydst_resize(net, dir);
683 	}
684 	if (xfrm_byidx_should_resize(net, total))
685 		xfrm_byidx_resize(net, total);
686 
687 	mutex_unlock(&hash_resize_mutex);
688 }
689 
690 /* Make sure *pol can be inserted into fastbin.
691  * Useful to check that later insert requests will be successful
692  * (provided xfrm_policy_lock is held throughout).
693  */
694 static struct xfrm_pol_inexact_bin *
695 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
696 {
697 	struct xfrm_pol_inexact_bin *bin, *prev;
698 	struct xfrm_pol_inexact_key k = {
699 		.family = pol->family,
700 		.type = pol->type,
701 		.dir = dir,
702 		.if_id = pol->if_id,
703 	};
704 	struct net *net = xp_net(pol);
705 
706 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
707 
708 	write_pnet(&k.net, net);
709 	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
710 				     xfrm_pol_inexact_params);
711 	if (bin)
712 		return bin;
713 
714 	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
715 	if (!bin)
716 		return NULL;
717 
718 	bin->k = k;
719 	INIT_HLIST_HEAD(&bin->hhead);
720 	bin->root_d = RB_ROOT;
721 	bin->root_s = RB_ROOT;
722 	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
723 
724 	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
725 						&bin->k, &bin->head,
726 						xfrm_pol_inexact_params);
727 	if (!prev) {
728 		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
729 		return bin;
730 	}
731 
732 	kfree(bin);
733 
734 	return IS_ERR(prev) ? NULL : prev;
735 }
736 
737 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
738 					       int family, u8 prefixlen)
739 {
740 	if (xfrm_addr_any(addr, family))
741 		return true;
742 
743 	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
744 		return true;
745 
746 	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
747 		return true;
748 
749 	return false;
750 }
751 
752 static bool
753 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
754 {
755 	const xfrm_address_t *addr;
756 	bool saddr_any, daddr_any;
757 	u8 prefixlen;
758 
759 	addr = &policy->selector.saddr;
760 	prefixlen = policy->selector.prefixlen_s;
761 
762 	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
763 						       policy->family,
764 						       prefixlen);
765 	addr = &policy->selector.daddr;
766 	prefixlen = policy->selector.prefixlen_d;
767 	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
768 						       policy->family,
769 						       prefixlen);
770 	return saddr_any && daddr_any;
771 }
772 
773 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
774 				       const xfrm_address_t *addr, u8 prefixlen)
775 {
776 	node->addr = *addr;
777 	node->prefixlen = prefixlen;
778 }
779 
780 static struct xfrm_pol_inexact_node *
781 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
782 {
783 	struct xfrm_pol_inexact_node *node;
784 
785 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
786 	if (node)
787 		xfrm_pol_inexact_node_init(node, addr, prefixlen);
788 
789 	return node;
790 }
791 
792 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
793 				  const xfrm_address_t *b,
794 				  u8 prefixlen, u16 family)
795 {
796 	u32 ma, mb, mask;
797 	unsigned int pdw, pbi;
798 	int delta = 0;
799 
800 	switch (family) {
801 	case AF_INET:
802 		if (prefixlen == 0)
803 			return 0;
804 		mask = ~0U << (32 - prefixlen);
805 		ma = ntohl(a->a4) & mask;
806 		mb = ntohl(b->a4) & mask;
807 		if (ma < mb)
808 			delta = -1;
809 		else if (ma > mb)
810 			delta = 1;
811 		break;
812 	case AF_INET6:
813 		pdw = prefixlen >> 5;
814 		pbi = prefixlen & 0x1f;
815 
816 		if (pdw) {
817 			delta = memcmp(a->a6, b->a6, pdw << 2);
818 			if (delta)
819 				return delta;
820 		}
821 		if (pbi) {
822 			mask = ~0U << (32 - pbi);
823 			ma = ntohl(a->a6[pdw]) & mask;
824 			mb = ntohl(b->a6[pdw]) & mask;
825 			if (ma < mb)
826 				delta = -1;
827 			else if (ma > mb)
828 				delta = 1;
829 		}
830 		break;
831 	default:
832 		break;
833 	}
834 
835 	return delta;
836 }
837 
838 static void xfrm_policy_inexact_list_reinsert(struct net *net,
839 					      struct xfrm_pol_inexact_node *n,
840 					      u16 family)
841 {
842 	unsigned int matched_s, matched_d;
843 	struct xfrm_policy *policy, *p;
844 
845 	matched_s = 0;
846 	matched_d = 0;
847 
848 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
849 		struct hlist_node *newpos = NULL;
850 		bool matches_s, matches_d;
851 
852 		if (!policy->bydst_reinsert)
853 			continue;
854 
855 		WARN_ON_ONCE(policy->family != family);
856 
857 		policy->bydst_reinsert = false;
858 		hlist_for_each_entry(p, &n->hhead, bydst) {
859 			if (policy->priority > p->priority)
860 				newpos = &p->bydst;
861 			else if (policy->priority == p->priority &&
862 				 policy->pos > p->pos)
863 				newpos = &p->bydst;
864 			else
865 				break;
866 		}
867 
868 		if (newpos)
869 			hlist_add_behind_rcu(&policy->bydst, newpos);
870 		else
871 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
872 
873 		/* paranoia checks follow.
874 		 * Check that the reinserted policy matches at least
875 		 * saddr or daddr for current node prefix.
876 		 *
877 		 * Matching both is fine, matching saddr in one policy
878 		 * (but not daddr) and then matching only daddr in another
879 		 * is a bug.
880 		 */
881 		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
882 						   &n->addr,
883 						   n->prefixlen,
884 						   family) == 0;
885 		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
886 						   &n->addr,
887 						   n->prefixlen,
888 						   family) == 0;
889 		if (matches_s && matches_d)
890 			continue;
891 
892 		WARN_ON_ONCE(!matches_s && !matches_d);
893 		if (matches_s)
894 			matched_s++;
895 		if (matches_d)
896 			matched_d++;
897 		WARN_ON_ONCE(matched_s && matched_d);
898 	}
899 }
900 
901 static void xfrm_policy_inexact_node_reinsert(struct net *net,
902 					      struct xfrm_pol_inexact_node *n,
903 					      struct rb_root *new,
904 					      u16 family)
905 {
906 	struct xfrm_pol_inexact_node *node;
907 	struct rb_node **p, *parent;
908 
909 	/* we should not have another subtree here */
910 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
911 restart:
912 	parent = NULL;
913 	p = &new->rb_node;
914 	while (*p) {
915 		u8 prefixlen;
916 		int delta;
917 
918 		parent = *p;
919 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
920 
921 		prefixlen = min(node->prefixlen, n->prefixlen);
922 
923 		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
924 					       prefixlen, family);
925 		if (delta < 0) {
926 			p = &parent->rb_left;
927 		} else if (delta > 0) {
928 			p = &parent->rb_right;
929 		} else {
930 			bool same_prefixlen = node->prefixlen == n->prefixlen;
931 			struct xfrm_policy *tmp;
932 
933 			hlist_for_each_entry(tmp, &n->hhead, bydst) {
934 				tmp->bydst_reinsert = true;
935 				hlist_del_rcu(&tmp->bydst);
936 			}
937 
938 			node->prefixlen = prefixlen;
939 
940 			xfrm_policy_inexact_list_reinsert(net, node, family);
941 
942 			if (same_prefixlen) {
943 				kfree_rcu(n, rcu);
944 				return;
945 			}
946 
947 			rb_erase(*p, new);
948 			kfree_rcu(n, rcu);
949 			n = node;
950 			goto restart;
951 		}
952 	}
953 
954 	rb_link_node_rcu(&n->node, parent, p);
955 	rb_insert_color(&n->node, new);
956 }
957 
958 /* merge nodes v and n */
959 static void xfrm_policy_inexact_node_merge(struct net *net,
960 					   struct xfrm_pol_inexact_node *v,
961 					   struct xfrm_pol_inexact_node *n,
962 					   u16 family)
963 {
964 	struct xfrm_pol_inexact_node *node;
965 	struct xfrm_policy *tmp;
966 	struct rb_node *rnode;
967 
968 	/* To-be-merged node v has a subtree.
969 	 *
970 	 * Dismantle it and insert its nodes to n->root.
971 	 */
972 	while ((rnode = rb_first(&v->root)) != NULL) {
973 		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
974 		rb_erase(&node->node, &v->root);
975 		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
976 						  family);
977 	}
978 
979 	hlist_for_each_entry(tmp, &v->hhead, bydst) {
980 		tmp->bydst_reinsert = true;
981 		hlist_del_rcu(&tmp->bydst);
982 	}
983 
984 	xfrm_policy_inexact_list_reinsert(net, n, family);
985 }
986 
987 static struct xfrm_pol_inexact_node *
988 xfrm_policy_inexact_insert_node(struct net *net,
989 				struct rb_root *root,
990 				xfrm_address_t *addr,
991 				u16 family, u8 prefixlen, u8 dir)
992 {
993 	struct xfrm_pol_inexact_node *cached = NULL;
994 	struct rb_node **p, *parent = NULL;
995 	struct xfrm_pol_inexact_node *node;
996 
997 	p = &root->rb_node;
998 	while (*p) {
999 		int delta;
1000 
1001 		parent = *p;
1002 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1003 
1004 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1005 					       node->prefixlen,
1006 					       family);
1007 		if (delta == 0 && prefixlen >= node->prefixlen) {
1008 			WARN_ON_ONCE(cached); /* ipsec policies got lost */
1009 			return node;
1010 		}
1011 
1012 		if (delta < 0)
1013 			p = &parent->rb_left;
1014 		else
1015 			p = &parent->rb_right;
1016 
1017 		if (prefixlen < node->prefixlen) {
1018 			delta = xfrm_policy_addr_delta(addr, &node->addr,
1019 						       prefixlen,
1020 						       family);
1021 			if (delta)
1022 				continue;
1023 
1024 			/* This node is a subnet of the new prefix. It needs
1025 			 * to be removed and re-inserted with the smaller
1026 			 * prefix and all nodes that are now also covered
1027 			 * by the reduced prefixlen.
1028 			 */
1029 			rb_erase(&node->node, root);
1030 
1031 			if (!cached) {
1032 				xfrm_pol_inexact_node_init(node, addr,
1033 							   prefixlen);
1034 				cached = node;
1035 			} else {
1036 				/* This node also falls within the new
1037 				 * prefixlen. Merge the to-be-reinserted
1038 				 * node and this one.
1039 				 */
1040 				xfrm_policy_inexact_node_merge(net, node,
1041 							       cached, family);
1042 				kfree_rcu(node, rcu);
1043 			}
1044 
1045 			/* restart */
1046 			p = &root->rb_node;
1047 			parent = NULL;
1048 		}
1049 	}
1050 
1051 	node = cached;
1052 	if (!node) {
1053 		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1054 		if (!node)
1055 			return NULL;
1056 	}
1057 
1058 	rb_link_node_rcu(&node->node, parent, p);
1059 	rb_insert_color(&node->node, root);
1060 
1061 	return node;
1062 }
1063 
1064 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1065 {
1066 	struct xfrm_pol_inexact_node *node;
1067 	struct rb_node *rn = rb_first(r);
1068 
1069 	while (rn) {
1070 		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1071 
1072 		xfrm_policy_inexact_gc_tree(&node->root, rm);
1073 		rn = rb_next(rn);
1074 
1075 		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1076 			WARN_ON_ONCE(rm);
1077 			continue;
1078 		}
1079 
1080 		rb_erase(&node->node, r);
1081 		kfree_rcu(node, rcu);
1082 	}
1083 }
1084 
1085 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1086 {
1087 	write_seqcount_begin(&b->count);
1088 	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1089 	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1090 	write_seqcount_end(&b->count);
1091 
1092 	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1093 	    !hlist_empty(&b->hhead)) {
1094 		WARN_ON_ONCE(net_exit);
1095 		return;
1096 	}
1097 
1098 	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1099 				   xfrm_pol_inexact_params) == 0) {
1100 		list_del(&b->inexact_bins);
1101 		kfree_rcu(b, rcu);
1102 	}
1103 }
1104 
1105 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1106 {
1107 	struct net *net = read_pnet(&b->k.net);
1108 
1109 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1110 	__xfrm_policy_inexact_prune_bin(b, false);
1111 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1112 }
1113 
1114 static void __xfrm_policy_inexact_flush(struct net *net)
1115 {
1116 	struct xfrm_pol_inexact_bin *bin, *t;
1117 
1118 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1119 
1120 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1121 		__xfrm_policy_inexact_prune_bin(bin, false);
1122 }
1123 
1124 static struct hlist_head *
1125 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1126 				struct xfrm_policy *policy, u8 dir)
1127 {
1128 	struct xfrm_pol_inexact_node *n;
1129 	struct net *net;
1130 
1131 	net = xp_net(policy);
1132 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1133 
1134 	if (xfrm_policy_inexact_insert_use_any_list(policy))
1135 		return &bin->hhead;
1136 
1137 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1138 					       policy->family,
1139 					       policy->selector.prefixlen_d)) {
1140 		write_seqcount_begin(&bin->count);
1141 		n = xfrm_policy_inexact_insert_node(net,
1142 						    &bin->root_s,
1143 						    &policy->selector.saddr,
1144 						    policy->family,
1145 						    policy->selector.prefixlen_s,
1146 						    dir);
1147 		write_seqcount_end(&bin->count);
1148 		if (!n)
1149 			return NULL;
1150 
1151 		return &n->hhead;
1152 	}
1153 
1154 	/* daddr is fixed */
1155 	write_seqcount_begin(&bin->count);
1156 	n = xfrm_policy_inexact_insert_node(net,
1157 					    &bin->root_d,
1158 					    &policy->selector.daddr,
1159 					    policy->family,
1160 					    policy->selector.prefixlen_d, dir);
1161 	write_seqcount_end(&bin->count);
1162 	if (!n)
1163 		return NULL;
1164 
1165 	/* saddr is wildcard */
1166 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1167 					       policy->family,
1168 					       policy->selector.prefixlen_s))
1169 		return &n->hhead;
1170 
1171 	write_seqcount_begin(&bin->count);
1172 	n = xfrm_policy_inexact_insert_node(net,
1173 					    &n->root,
1174 					    &policy->selector.saddr,
1175 					    policy->family,
1176 					    policy->selector.prefixlen_s, dir);
1177 	write_seqcount_end(&bin->count);
1178 	if (!n)
1179 		return NULL;
1180 
1181 	return &n->hhead;
1182 }
1183 
1184 static struct xfrm_policy *
1185 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1186 {
1187 	struct xfrm_pol_inexact_bin *bin;
1188 	struct xfrm_policy *delpol;
1189 	struct hlist_head *chain;
1190 	struct net *net;
1191 
1192 	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1193 	if (!bin)
1194 		return ERR_PTR(-ENOMEM);
1195 
1196 	net = xp_net(policy);
1197 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1198 
1199 	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1200 	if (!chain) {
1201 		__xfrm_policy_inexact_prune_bin(bin, false);
1202 		return ERR_PTR(-ENOMEM);
1203 	}
1204 
1205 	delpol = xfrm_policy_insert_list(chain, policy, excl);
1206 	if (delpol && excl) {
1207 		__xfrm_policy_inexact_prune_bin(bin, false);
1208 		return ERR_PTR(-EEXIST);
1209 	}
1210 
1211 	chain = &net->xfrm.policy_inexact[dir];
1212 	xfrm_policy_insert_inexact_list(chain, policy);
1213 
1214 	if (delpol)
1215 		__xfrm_policy_inexact_prune_bin(bin, false);
1216 
1217 	return delpol;
1218 }
1219 
1220 static void xfrm_hash_rebuild(struct work_struct *work)
1221 {
1222 	struct net *net = container_of(work, struct net,
1223 				       xfrm.policy_hthresh.work);
1224 	unsigned int hmask;
1225 	struct xfrm_policy *pol;
1226 	struct xfrm_policy *policy;
1227 	struct hlist_head *chain;
1228 	struct hlist_head *odst;
1229 	struct hlist_node *newpos;
1230 	int i;
1231 	int dir;
1232 	unsigned seq;
1233 	u8 lbits4, rbits4, lbits6, rbits6;
1234 
1235 	mutex_lock(&hash_resize_mutex);
1236 
1237 	/* read selector prefixlen thresholds */
1238 	do {
1239 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1240 
1241 		lbits4 = net->xfrm.policy_hthresh.lbits4;
1242 		rbits4 = net->xfrm.policy_hthresh.rbits4;
1243 		lbits6 = net->xfrm.policy_hthresh.lbits6;
1244 		rbits6 = net->xfrm.policy_hthresh.rbits6;
1245 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1246 
1247 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1248 	write_seqcount_begin(&xfrm_policy_hash_generation);
1249 
1250 	/* make sure that we can insert the indirect policies again before
1251 	 * we start with destructive action.
1252 	 */
1253 	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1254 		struct xfrm_pol_inexact_bin *bin;
1255 		u8 dbits, sbits;
1256 
1257 		dir = xfrm_policy_id2dir(policy->index);
1258 		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1259 			continue;
1260 
1261 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1262 			if (policy->family == AF_INET) {
1263 				dbits = rbits4;
1264 				sbits = lbits4;
1265 			} else {
1266 				dbits = rbits6;
1267 				sbits = lbits6;
1268 			}
1269 		} else {
1270 			if (policy->family == AF_INET) {
1271 				dbits = lbits4;
1272 				sbits = rbits4;
1273 			} else {
1274 				dbits = lbits6;
1275 				sbits = rbits6;
1276 			}
1277 		}
1278 
1279 		if (policy->selector.prefixlen_d < dbits ||
1280 		    policy->selector.prefixlen_s < sbits)
1281 			continue;
1282 
1283 		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1284 		if (!bin)
1285 			goto out_unlock;
1286 
1287 		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1288 			goto out_unlock;
1289 	}
1290 
1291 	/* reset the bydst and inexact table in all directions */
1292 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1293 		struct hlist_node *n;
1294 
1295 		hlist_for_each_entry_safe(policy, n,
1296 					  &net->xfrm.policy_inexact[dir],
1297 					  bydst_inexact_list) {
1298 			hlist_del_rcu(&policy->bydst);
1299 			hlist_del_init(&policy->bydst_inexact_list);
1300 		}
1301 
1302 		hmask = net->xfrm.policy_bydst[dir].hmask;
1303 		odst = net->xfrm.policy_bydst[dir].table;
1304 		for (i = hmask; i >= 0; i--) {
1305 			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1306 				hlist_del_rcu(&policy->bydst);
1307 		}
1308 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1309 			/* dir out => dst = remote, src = local */
1310 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1311 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1312 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1313 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1314 		} else {
1315 			/* dir in/fwd => dst = local, src = remote */
1316 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1317 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1318 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1319 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1320 		}
1321 	}
1322 
1323 	/* re-insert all policies by order of creation */
1324 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1325 		if (policy->walk.dead)
1326 			continue;
1327 		dir = xfrm_policy_id2dir(policy->index);
1328 		if (dir >= XFRM_POLICY_MAX) {
1329 			/* skip socket policies */
1330 			continue;
1331 		}
1332 		newpos = NULL;
1333 		chain = policy_hash_bysel(net, &policy->selector,
1334 					  policy->family, dir);
1335 
1336 		if (!chain) {
1337 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1338 
1339 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1340 			continue;
1341 		}
1342 
1343 		hlist_for_each_entry(pol, chain, bydst) {
1344 			if (policy->priority >= pol->priority)
1345 				newpos = &pol->bydst;
1346 			else
1347 				break;
1348 		}
1349 		if (newpos)
1350 			hlist_add_behind_rcu(&policy->bydst, newpos);
1351 		else
1352 			hlist_add_head_rcu(&policy->bydst, chain);
1353 	}
1354 
1355 out_unlock:
1356 	__xfrm_policy_inexact_flush(net);
1357 	write_seqcount_end(&xfrm_policy_hash_generation);
1358 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1359 
1360 	mutex_unlock(&hash_resize_mutex);
1361 }
1362 
1363 void xfrm_policy_hash_rebuild(struct net *net)
1364 {
1365 	schedule_work(&net->xfrm.policy_hthresh.work);
1366 }
1367 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1368 
1369 /* Generate new index... KAME seems to generate them ordered by cost
1370  * of an absolute inpredictability of ordering of rules. This will not pass. */
1371 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1372 {
1373 	static u32 idx_generator;
1374 
1375 	for (;;) {
1376 		struct hlist_head *list;
1377 		struct xfrm_policy *p;
1378 		u32 idx;
1379 		int found;
1380 
1381 		if (!index) {
1382 			idx = (idx_generator | dir);
1383 			idx_generator += 8;
1384 		} else {
1385 			idx = index;
1386 			index = 0;
1387 		}
1388 
1389 		if (idx == 0)
1390 			idx = 8;
1391 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1392 		found = 0;
1393 		hlist_for_each_entry(p, list, byidx) {
1394 			if (p->index == idx) {
1395 				found = 1;
1396 				break;
1397 			}
1398 		}
1399 		if (!found)
1400 			return idx;
1401 	}
1402 }
1403 
1404 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1405 {
1406 	u32 *p1 = (u32 *) s1;
1407 	u32 *p2 = (u32 *) s2;
1408 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1409 	int i;
1410 
1411 	for (i = 0; i < len; i++) {
1412 		if (p1[i] != p2[i])
1413 			return 1;
1414 	}
1415 
1416 	return 0;
1417 }
1418 
1419 static void xfrm_policy_requeue(struct xfrm_policy *old,
1420 				struct xfrm_policy *new)
1421 {
1422 	struct xfrm_policy_queue *pq = &old->polq;
1423 	struct sk_buff_head list;
1424 
1425 	if (skb_queue_empty(&pq->hold_queue))
1426 		return;
1427 
1428 	__skb_queue_head_init(&list);
1429 
1430 	spin_lock_bh(&pq->hold_queue.lock);
1431 	skb_queue_splice_init(&pq->hold_queue, &list);
1432 	if (del_timer(&pq->hold_timer))
1433 		xfrm_pol_put(old);
1434 	spin_unlock_bh(&pq->hold_queue.lock);
1435 
1436 	pq = &new->polq;
1437 
1438 	spin_lock_bh(&pq->hold_queue.lock);
1439 	skb_queue_splice(&list, &pq->hold_queue);
1440 	pq->timeout = XFRM_QUEUE_TMO_MIN;
1441 	if (!mod_timer(&pq->hold_timer, jiffies))
1442 		xfrm_pol_hold(new);
1443 	spin_unlock_bh(&pq->hold_queue.lock);
1444 }
1445 
1446 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1447 					  struct xfrm_policy *pol)
1448 {
1449 	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1450 }
1451 
1452 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1453 {
1454 	const struct xfrm_pol_inexact_key *k = data;
1455 	u32 a = k->type << 24 | k->dir << 16 | k->family;
1456 
1457 	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1458 			    seed);
1459 }
1460 
1461 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1462 {
1463 	const struct xfrm_pol_inexact_bin *b = data;
1464 
1465 	return xfrm_pol_bin_key(&b->k, 0, seed);
1466 }
1467 
1468 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1469 			    const void *ptr)
1470 {
1471 	const struct xfrm_pol_inexact_key *key = arg->key;
1472 	const struct xfrm_pol_inexact_bin *b = ptr;
1473 	int ret;
1474 
1475 	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1476 		return -1;
1477 
1478 	ret = b->k.dir ^ key->dir;
1479 	if (ret)
1480 		return ret;
1481 
1482 	ret = b->k.type ^ key->type;
1483 	if (ret)
1484 		return ret;
1485 
1486 	ret = b->k.family ^ key->family;
1487 	if (ret)
1488 		return ret;
1489 
1490 	return b->k.if_id ^ key->if_id;
1491 }
1492 
1493 static const struct rhashtable_params xfrm_pol_inexact_params = {
1494 	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1495 	.hashfn			= xfrm_pol_bin_key,
1496 	.obj_hashfn		= xfrm_pol_bin_obj,
1497 	.obj_cmpfn		= xfrm_pol_bin_cmp,
1498 	.automatic_shrinking	= true,
1499 };
1500 
1501 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1502 					    struct xfrm_policy *policy)
1503 {
1504 	struct xfrm_policy *pol, *delpol = NULL;
1505 	struct hlist_node *newpos = NULL;
1506 	int i = 0;
1507 
1508 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1509 		if (pol->type == policy->type &&
1510 		    pol->if_id == policy->if_id &&
1511 		    !selector_cmp(&pol->selector, &policy->selector) &&
1512 		    xfrm_policy_mark_match(&policy->mark, pol) &&
1513 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1514 		    !WARN_ON(delpol)) {
1515 			delpol = pol;
1516 			if (policy->priority > pol->priority)
1517 				continue;
1518 		} else if (policy->priority >= pol->priority) {
1519 			newpos = &pol->bydst_inexact_list;
1520 			continue;
1521 		}
1522 		if (delpol)
1523 			break;
1524 	}
1525 
1526 	if (newpos)
1527 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1528 	else
1529 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1530 
1531 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1532 		pol->pos = i;
1533 		i++;
1534 	}
1535 }
1536 
1537 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1538 						   struct xfrm_policy *policy,
1539 						   bool excl)
1540 {
1541 	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1542 
1543 	hlist_for_each_entry(pol, chain, bydst) {
1544 		if (pol->type == policy->type &&
1545 		    pol->if_id == policy->if_id &&
1546 		    !selector_cmp(&pol->selector, &policy->selector) &&
1547 		    xfrm_policy_mark_match(&policy->mark, pol) &&
1548 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1549 		    !WARN_ON(delpol)) {
1550 			if (excl)
1551 				return ERR_PTR(-EEXIST);
1552 			delpol = pol;
1553 			if (policy->priority > pol->priority)
1554 				continue;
1555 		} else if (policy->priority >= pol->priority) {
1556 			newpos = pol;
1557 			continue;
1558 		}
1559 		if (delpol)
1560 			break;
1561 	}
1562 
1563 	if (newpos)
1564 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1565 	else
1566 		hlist_add_head_rcu(&policy->bydst, chain);
1567 
1568 	return delpol;
1569 }
1570 
1571 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1572 {
1573 	struct net *net = xp_net(policy);
1574 	struct xfrm_policy *delpol;
1575 	struct hlist_head *chain;
1576 
1577 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1578 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1579 	if (chain)
1580 		delpol = xfrm_policy_insert_list(chain, policy, excl);
1581 	else
1582 		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1583 
1584 	if (IS_ERR(delpol)) {
1585 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1586 		return PTR_ERR(delpol);
1587 	}
1588 
1589 	__xfrm_policy_link(policy, dir);
1590 
1591 	/* After previous checking, family can either be AF_INET or AF_INET6 */
1592 	if (policy->family == AF_INET)
1593 		rt_genid_bump_ipv4(net);
1594 	else
1595 		rt_genid_bump_ipv6(net);
1596 
1597 	if (delpol) {
1598 		xfrm_policy_requeue(delpol, policy);
1599 		__xfrm_policy_unlink(delpol, dir);
1600 	}
1601 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1602 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1603 	policy->curlft.add_time = ktime_get_real_seconds();
1604 	policy->curlft.use_time = 0;
1605 	if (!mod_timer(&policy->timer, jiffies + HZ))
1606 		xfrm_pol_hold(policy);
1607 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1608 
1609 	if (delpol)
1610 		xfrm_policy_kill(delpol);
1611 	else if (xfrm_bydst_should_resize(net, dir, NULL))
1612 		schedule_work(&net->xfrm.policy_hash_work);
1613 
1614 	return 0;
1615 }
1616 EXPORT_SYMBOL(xfrm_policy_insert);
1617 
1618 static struct xfrm_policy *
1619 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1620 			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1621 			struct xfrm_sec_ctx *ctx)
1622 {
1623 	struct xfrm_policy *pol;
1624 
1625 	if (!chain)
1626 		return NULL;
1627 
1628 	hlist_for_each_entry(pol, chain, bydst) {
1629 		if (pol->type == type &&
1630 		    pol->if_id == if_id &&
1631 		    xfrm_policy_mark_match(mark, pol) &&
1632 		    !selector_cmp(sel, &pol->selector) &&
1633 		    xfrm_sec_ctx_match(ctx, pol->security))
1634 			return pol;
1635 	}
1636 
1637 	return NULL;
1638 }
1639 
1640 struct xfrm_policy *
1641 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1642 		      u8 type, int dir, struct xfrm_selector *sel,
1643 		      struct xfrm_sec_ctx *ctx, int delete, int *err)
1644 {
1645 	struct xfrm_pol_inexact_bin *bin = NULL;
1646 	struct xfrm_policy *pol, *ret = NULL;
1647 	struct hlist_head *chain;
1648 
1649 	*err = 0;
1650 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1651 	chain = policy_hash_bysel(net, sel, sel->family, dir);
1652 	if (!chain) {
1653 		struct xfrm_pol_inexact_candidates cand;
1654 		int i;
1655 
1656 		bin = xfrm_policy_inexact_lookup(net, type,
1657 						 sel->family, dir, if_id);
1658 		if (!bin) {
1659 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1660 			return NULL;
1661 		}
1662 
1663 		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1664 							 &sel->saddr,
1665 							 &sel->daddr)) {
1666 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1667 			return NULL;
1668 		}
1669 
1670 		pol = NULL;
1671 		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1672 			struct xfrm_policy *tmp;
1673 
1674 			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1675 						      if_id, type, dir,
1676 						      sel, ctx);
1677 			if (!tmp)
1678 				continue;
1679 
1680 			if (!pol || tmp->pos < pol->pos)
1681 				pol = tmp;
1682 		}
1683 	} else {
1684 		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1685 					      sel, ctx);
1686 	}
1687 
1688 	if (pol) {
1689 		xfrm_pol_hold(pol);
1690 		if (delete) {
1691 			*err = security_xfrm_policy_delete(pol->security);
1692 			if (*err) {
1693 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1694 				return pol;
1695 			}
1696 			__xfrm_policy_unlink(pol, dir);
1697 		}
1698 		ret = pol;
1699 	}
1700 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1701 
1702 	if (ret && delete)
1703 		xfrm_policy_kill(ret);
1704 	if (bin && delete)
1705 		xfrm_policy_inexact_prune_bin(bin);
1706 	return ret;
1707 }
1708 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1709 
1710 struct xfrm_policy *
1711 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1712 		 u8 type, int dir, u32 id, int delete, int *err)
1713 {
1714 	struct xfrm_policy *pol, *ret;
1715 	struct hlist_head *chain;
1716 
1717 	*err = -ENOENT;
1718 	if (xfrm_policy_id2dir(id) != dir)
1719 		return NULL;
1720 
1721 	*err = 0;
1722 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1723 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1724 	ret = NULL;
1725 	hlist_for_each_entry(pol, chain, byidx) {
1726 		if (pol->type == type && pol->index == id &&
1727 		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1728 			xfrm_pol_hold(pol);
1729 			if (delete) {
1730 				*err = security_xfrm_policy_delete(
1731 								pol->security);
1732 				if (*err) {
1733 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1734 					return pol;
1735 				}
1736 				__xfrm_policy_unlink(pol, dir);
1737 			}
1738 			ret = pol;
1739 			break;
1740 		}
1741 	}
1742 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1743 
1744 	if (ret && delete)
1745 		xfrm_policy_kill(ret);
1746 	return ret;
1747 }
1748 EXPORT_SYMBOL(xfrm_policy_byid);
1749 
1750 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1751 static inline int
1752 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1753 {
1754 	struct xfrm_policy *pol;
1755 	int err = 0;
1756 
1757 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1758 		if (pol->walk.dead ||
1759 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1760 		    pol->type != type)
1761 			continue;
1762 
1763 		err = security_xfrm_policy_delete(pol->security);
1764 		if (err) {
1765 			xfrm_audit_policy_delete(pol, 0, task_valid);
1766 			return err;
1767 		}
1768 	}
1769 	return err;
1770 }
1771 #else
1772 static inline int
1773 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1774 {
1775 	return 0;
1776 }
1777 #endif
1778 
1779 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1780 {
1781 	int dir, err = 0, cnt = 0;
1782 	struct xfrm_policy *pol;
1783 
1784 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1785 
1786 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1787 	if (err)
1788 		goto out;
1789 
1790 again:
1791 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1792 		dir = xfrm_policy_id2dir(pol->index);
1793 		if (pol->walk.dead ||
1794 		    dir >= XFRM_POLICY_MAX ||
1795 		    pol->type != type)
1796 			continue;
1797 
1798 		__xfrm_policy_unlink(pol, dir);
1799 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1800 		cnt++;
1801 		xfrm_audit_policy_delete(pol, 1, task_valid);
1802 		xfrm_policy_kill(pol);
1803 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1804 		goto again;
1805 	}
1806 	if (cnt)
1807 		__xfrm_policy_inexact_flush(net);
1808 	else
1809 		err = -ESRCH;
1810 out:
1811 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1812 	return err;
1813 }
1814 EXPORT_SYMBOL(xfrm_policy_flush);
1815 
1816 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1817 		     int (*func)(struct xfrm_policy *, int, int, void*),
1818 		     void *data)
1819 {
1820 	struct xfrm_policy *pol;
1821 	struct xfrm_policy_walk_entry *x;
1822 	int error = 0;
1823 
1824 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1825 	    walk->type != XFRM_POLICY_TYPE_ANY)
1826 		return -EINVAL;
1827 
1828 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1829 		return 0;
1830 
1831 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1832 	if (list_empty(&walk->walk.all))
1833 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1834 	else
1835 		x = list_first_entry(&walk->walk.all,
1836 				     struct xfrm_policy_walk_entry, all);
1837 
1838 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1839 		if (x->dead)
1840 			continue;
1841 		pol = container_of(x, struct xfrm_policy, walk);
1842 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1843 		    walk->type != pol->type)
1844 			continue;
1845 		error = func(pol, xfrm_policy_id2dir(pol->index),
1846 			     walk->seq, data);
1847 		if (error) {
1848 			list_move_tail(&walk->walk.all, &x->all);
1849 			goto out;
1850 		}
1851 		walk->seq++;
1852 	}
1853 	if (walk->seq == 0) {
1854 		error = -ENOENT;
1855 		goto out;
1856 	}
1857 	list_del_init(&walk->walk.all);
1858 out:
1859 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1860 	return error;
1861 }
1862 EXPORT_SYMBOL(xfrm_policy_walk);
1863 
1864 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1865 {
1866 	INIT_LIST_HEAD(&walk->walk.all);
1867 	walk->walk.dead = 1;
1868 	walk->type = type;
1869 	walk->seq = 0;
1870 }
1871 EXPORT_SYMBOL(xfrm_policy_walk_init);
1872 
1873 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1874 {
1875 	if (list_empty(&walk->walk.all))
1876 		return;
1877 
1878 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1879 	list_del(&walk->walk.all);
1880 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1881 }
1882 EXPORT_SYMBOL(xfrm_policy_walk_done);
1883 
1884 /*
1885  * Find policy to apply to this flow.
1886  *
1887  * Returns 0 if policy found, else an -errno.
1888  */
1889 static int xfrm_policy_match(const struct xfrm_policy *pol,
1890 			     const struct flowi *fl,
1891 			     u8 type, u16 family, int dir, u32 if_id)
1892 {
1893 	const struct xfrm_selector *sel = &pol->selector;
1894 	int ret = -ESRCH;
1895 	bool match;
1896 
1897 	if (pol->family != family ||
1898 	    pol->if_id != if_id ||
1899 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1900 	    pol->type != type)
1901 		return ret;
1902 
1903 	match = xfrm_selector_match(sel, fl, family);
1904 	if (match)
1905 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1906 	return ret;
1907 }
1908 
1909 static struct xfrm_pol_inexact_node *
1910 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1911 				seqcount_spinlock_t *count,
1912 				const xfrm_address_t *addr, u16 family)
1913 {
1914 	const struct rb_node *parent;
1915 	int seq;
1916 
1917 again:
1918 	seq = read_seqcount_begin(count);
1919 
1920 	parent = rcu_dereference_raw(r->rb_node);
1921 	while (parent) {
1922 		struct xfrm_pol_inexact_node *node;
1923 		int delta;
1924 
1925 		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1926 
1927 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1928 					       node->prefixlen, family);
1929 		if (delta < 0) {
1930 			parent = rcu_dereference_raw(parent->rb_left);
1931 			continue;
1932 		} else if (delta > 0) {
1933 			parent = rcu_dereference_raw(parent->rb_right);
1934 			continue;
1935 		}
1936 
1937 		return node;
1938 	}
1939 
1940 	if (read_seqcount_retry(count, seq))
1941 		goto again;
1942 
1943 	return NULL;
1944 }
1945 
1946 static bool
1947 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1948 				    struct xfrm_pol_inexact_bin *b,
1949 				    const xfrm_address_t *saddr,
1950 				    const xfrm_address_t *daddr)
1951 {
1952 	struct xfrm_pol_inexact_node *n;
1953 	u16 family;
1954 
1955 	if (!b)
1956 		return false;
1957 
1958 	family = b->k.family;
1959 	memset(cand, 0, sizeof(*cand));
1960 	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1961 
1962 	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1963 					    family);
1964 	if (n) {
1965 		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1966 		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1967 						    family);
1968 		if (n)
1969 			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1970 	}
1971 
1972 	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1973 					    family);
1974 	if (n)
1975 		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1976 
1977 	return true;
1978 }
1979 
1980 static struct xfrm_pol_inexact_bin *
1981 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1982 			       u8 dir, u32 if_id)
1983 {
1984 	struct xfrm_pol_inexact_key k = {
1985 		.family = family,
1986 		.type = type,
1987 		.dir = dir,
1988 		.if_id = if_id,
1989 	};
1990 
1991 	write_pnet(&k.net, net);
1992 
1993 	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1994 				 xfrm_pol_inexact_params);
1995 }
1996 
1997 static struct xfrm_pol_inexact_bin *
1998 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1999 			   u8 dir, u32 if_id)
2000 {
2001 	struct xfrm_pol_inexact_bin *bin;
2002 
2003 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2004 
2005 	rcu_read_lock();
2006 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2007 	rcu_read_unlock();
2008 
2009 	return bin;
2010 }
2011 
2012 static struct xfrm_policy *
2013 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2014 			      struct xfrm_policy *prefer,
2015 			      const struct flowi *fl,
2016 			      u8 type, u16 family, int dir, u32 if_id)
2017 {
2018 	u32 priority = prefer ? prefer->priority : ~0u;
2019 	struct xfrm_policy *pol;
2020 
2021 	if (!chain)
2022 		return NULL;
2023 
2024 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2025 		int err;
2026 
2027 		if (pol->priority > priority)
2028 			break;
2029 
2030 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2031 		if (err) {
2032 			if (err != -ESRCH)
2033 				return ERR_PTR(err);
2034 
2035 			continue;
2036 		}
2037 
2038 		if (prefer) {
2039 			/* matches.  Is it older than *prefer? */
2040 			if (pol->priority == priority &&
2041 			    prefer->pos < pol->pos)
2042 				return prefer;
2043 		}
2044 
2045 		return pol;
2046 	}
2047 
2048 	return NULL;
2049 }
2050 
2051 static struct xfrm_policy *
2052 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2053 			    struct xfrm_policy *prefer,
2054 			    const struct flowi *fl,
2055 			    u8 type, u16 family, int dir, u32 if_id)
2056 {
2057 	struct xfrm_policy *tmp;
2058 	int i;
2059 
2060 	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2061 		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2062 						    prefer,
2063 						    fl, type, family, dir,
2064 						    if_id);
2065 		if (!tmp)
2066 			continue;
2067 
2068 		if (IS_ERR(tmp))
2069 			return tmp;
2070 		prefer = tmp;
2071 	}
2072 
2073 	return prefer;
2074 }
2075 
2076 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2077 						     const struct flowi *fl,
2078 						     u16 family, u8 dir,
2079 						     u32 if_id)
2080 {
2081 	struct xfrm_pol_inexact_candidates cand;
2082 	const xfrm_address_t *daddr, *saddr;
2083 	struct xfrm_pol_inexact_bin *bin;
2084 	struct xfrm_policy *pol, *ret;
2085 	struct hlist_head *chain;
2086 	unsigned int sequence;
2087 	int err;
2088 
2089 	daddr = xfrm_flowi_daddr(fl, family);
2090 	saddr = xfrm_flowi_saddr(fl, family);
2091 	if (unlikely(!daddr || !saddr))
2092 		return NULL;
2093 
2094  retry:
2095 	sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2096 	rcu_read_lock();
2097 
2098 	chain = policy_hash_direct(net, daddr, saddr, family, dir);
2099 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
2100 		rcu_read_unlock();
2101 		goto retry;
2102 	}
2103 
2104 	ret = NULL;
2105 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2106 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2107 		if (err) {
2108 			if (err == -ESRCH)
2109 				continue;
2110 			else {
2111 				ret = ERR_PTR(err);
2112 				goto fail;
2113 			}
2114 		} else {
2115 			ret = pol;
2116 			break;
2117 		}
2118 	}
2119 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2120 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2121 							 daddr))
2122 		goto skip_inexact;
2123 
2124 	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2125 					  family, dir, if_id);
2126 	if (pol) {
2127 		ret = pol;
2128 		if (IS_ERR(pol))
2129 			goto fail;
2130 	}
2131 
2132 skip_inexact:
2133 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) {
2134 		rcu_read_unlock();
2135 		goto retry;
2136 	}
2137 
2138 	if (ret && !xfrm_pol_hold_rcu(ret)) {
2139 		rcu_read_unlock();
2140 		goto retry;
2141 	}
2142 fail:
2143 	rcu_read_unlock();
2144 
2145 	return ret;
2146 }
2147 
2148 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2149 					      const struct flowi *fl,
2150 					      u16 family, u8 dir, u32 if_id)
2151 {
2152 #ifdef CONFIG_XFRM_SUB_POLICY
2153 	struct xfrm_policy *pol;
2154 
2155 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2156 					dir, if_id);
2157 	if (pol != NULL)
2158 		return pol;
2159 #endif
2160 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2161 					 dir, if_id);
2162 }
2163 
2164 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2165 						 const struct flowi *fl,
2166 						 u16 family, u32 if_id)
2167 {
2168 	struct xfrm_policy *pol;
2169 
2170 	rcu_read_lock();
2171  again:
2172 	pol = rcu_dereference(sk->sk_policy[dir]);
2173 	if (pol != NULL) {
2174 		bool match;
2175 		int err = 0;
2176 
2177 		if (pol->family != family) {
2178 			pol = NULL;
2179 			goto out;
2180 		}
2181 
2182 		match = xfrm_selector_match(&pol->selector, fl, family);
2183 		if (match) {
2184 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2185 			    pol->if_id != if_id) {
2186 				pol = NULL;
2187 				goto out;
2188 			}
2189 			err = security_xfrm_policy_lookup(pol->security,
2190 						      fl->flowi_secid);
2191 			if (!err) {
2192 				if (!xfrm_pol_hold_rcu(pol))
2193 					goto again;
2194 			} else if (err == -ESRCH) {
2195 				pol = NULL;
2196 			} else {
2197 				pol = ERR_PTR(err);
2198 			}
2199 		} else
2200 			pol = NULL;
2201 	}
2202 out:
2203 	rcu_read_unlock();
2204 	return pol;
2205 }
2206 
2207 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2208 {
2209 	struct net *net = xp_net(pol);
2210 
2211 	list_add(&pol->walk.all, &net->xfrm.policy_all);
2212 	net->xfrm.policy_count[dir]++;
2213 	xfrm_pol_hold(pol);
2214 }
2215 
2216 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2217 						int dir)
2218 {
2219 	struct net *net = xp_net(pol);
2220 
2221 	if (list_empty(&pol->walk.all))
2222 		return NULL;
2223 
2224 	/* Socket policies are not hashed. */
2225 	if (!hlist_unhashed(&pol->bydst)) {
2226 		hlist_del_rcu(&pol->bydst);
2227 		hlist_del_init(&pol->bydst_inexact_list);
2228 		hlist_del(&pol->byidx);
2229 	}
2230 
2231 	list_del_init(&pol->walk.all);
2232 	net->xfrm.policy_count[dir]--;
2233 
2234 	return pol;
2235 }
2236 
2237 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2238 {
2239 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2240 }
2241 
2242 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2243 {
2244 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2245 }
2246 
2247 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2248 {
2249 	struct net *net = xp_net(pol);
2250 
2251 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2252 	pol = __xfrm_policy_unlink(pol, dir);
2253 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2254 	if (pol) {
2255 		xfrm_policy_kill(pol);
2256 		return 0;
2257 	}
2258 	return -ENOENT;
2259 }
2260 EXPORT_SYMBOL(xfrm_policy_delete);
2261 
2262 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2263 {
2264 	struct net *net = sock_net(sk);
2265 	struct xfrm_policy *old_pol;
2266 
2267 #ifdef CONFIG_XFRM_SUB_POLICY
2268 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2269 		return -EINVAL;
2270 #endif
2271 
2272 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2273 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2274 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2275 	if (pol) {
2276 		pol->curlft.add_time = ktime_get_real_seconds();
2277 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2278 		xfrm_sk_policy_link(pol, dir);
2279 	}
2280 	rcu_assign_pointer(sk->sk_policy[dir], pol);
2281 	if (old_pol) {
2282 		if (pol)
2283 			xfrm_policy_requeue(old_pol, pol);
2284 
2285 		/* Unlinking succeeds always. This is the only function
2286 		 * allowed to delete or replace socket policy.
2287 		 */
2288 		xfrm_sk_policy_unlink(old_pol, dir);
2289 	}
2290 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2291 
2292 	if (old_pol) {
2293 		xfrm_policy_kill(old_pol);
2294 	}
2295 	return 0;
2296 }
2297 
2298 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2299 {
2300 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2301 	struct net *net = xp_net(old);
2302 
2303 	if (newp) {
2304 		newp->selector = old->selector;
2305 		if (security_xfrm_policy_clone(old->security,
2306 					       &newp->security)) {
2307 			kfree(newp);
2308 			return NULL;  /* ENOMEM */
2309 		}
2310 		newp->lft = old->lft;
2311 		newp->curlft = old->curlft;
2312 		newp->mark = old->mark;
2313 		newp->if_id = old->if_id;
2314 		newp->action = old->action;
2315 		newp->flags = old->flags;
2316 		newp->xfrm_nr = old->xfrm_nr;
2317 		newp->index = old->index;
2318 		newp->type = old->type;
2319 		newp->family = old->family;
2320 		memcpy(newp->xfrm_vec, old->xfrm_vec,
2321 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2322 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2323 		xfrm_sk_policy_link(newp, dir);
2324 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2325 		xfrm_pol_put(newp);
2326 	}
2327 	return newp;
2328 }
2329 
2330 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2331 {
2332 	const struct xfrm_policy *p;
2333 	struct xfrm_policy *np;
2334 	int i, ret = 0;
2335 
2336 	rcu_read_lock();
2337 	for (i = 0; i < 2; i++) {
2338 		p = rcu_dereference(osk->sk_policy[i]);
2339 		if (p) {
2340 			np = clone_policy(p, i);
2341 			if (unlikely(!np)) {
2342 				ret = -ENOMEM;
2343 				break;
2344 			}
2345 			rcu_assign_pointer(sk->sk_policy[i], np);
2346 		}
2347 	}
2348 	rcu_read_unlock();
2349 	return ret;
2350 }
2351 
2352 static int
2353 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2354 	       xfrm_address_t *remote, unsigned short family, u32 mark)
2355 {
2356 	int err;
2357 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2358 
2359 	if (unlikely(afinfo == NULL))
2360 		return -EINVAL;
2361 	err = afinfo->get_saddr(net, oif, local, remote, mark);
2362 	rcu_read_unlock();
2363 	return err;
2364 }
2365 
2366 /* Resolve list of templates for the flow, given policy. */
2367 
2368 static int
2369 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2370 		      struct xfrm_state **xfrm, unsigned short family)
2371 {
2372 	struct net *net = xp_net(policy);
2373 	int nx;
2374 	int i, error;
2375 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2376 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2377 	xfrm_address_t tmp;
2378 
2379 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2380 		struct xfrm_state *x;
2381 		xfrm_address_t *remote = daddr;
2382 		xfrm_address_t *local  = saddr;
2383 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2384 
2385 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2386 		    tmpl->mode == XFRM_MODE_BEET) {
2387 			remote = &tmpl->id.daddr;
2388 			local = &tmpl->saddr;
2389 			if (xfrm_addr_any(local, tmpl->encap_family)) {
2390 				error = xfrm_get_saddr(net, fl->flowi_oif,
2391 						       &tmp, remote,
2392 						       tmpl->encap_family, 0);
2393 				if (error)
2394 					goto fail;
2395 				local = &tmp;
2396 			}
2397 		}
2398 
2399 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2400 				    family, policy->if_id);
2401 
2402 		if (x && x->km.state == XFRM_STATE_VALID) {
2403 			xfrm[nx++] = x;
2404 			daddr = remote;
2405 			saddr = local;
2406 			continue;
2407 		}
2408 		if (x) {
2409 			error = (x->km.state == XFRM_STATE_ERROR ?
2410 				 -EINVAL : -EAGAIN);
2411 			xfrm_state_put(x);
2412 		} else if (error == -ESRCH) {
2413 			error = -EAGAIN;
2414 		}
2415 
2416 		if (!tmpl->optional)
2417 			goto fail;
2418 	}
2419 	return nx;
2420 
2421 fail:
2422 	for (nx--; nx >= 0; nx--)
2423 		xfrm_state_put(xfrm[nx]);
2424 	return error;
2425 }
2426 
2427 static int
2428 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2429 		  struct xfrm_state **xfrm, unsigned short family)
2430 {
2431 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2432 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2433 	int cnx = 0;
2434 	int error;
2435 	int ret;
2436 	int i;
2437 
2438 	for (i = 0; i < npols; i++) {
2439 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2440 			error = -ENOBUFS;
2441 			goto fail;
2442 		}
2443 
2444 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2445 		if (ret < 0) {
2446 			error = ret;
2447 			goto fail;
2448 		} else
2449 			cnx += ret;
2450 	}
2451 
2452 	/* found states are sorted for outbound processing */
2453 	if (npols > 1)
2454 		xfrm_state_sort(xfrm, tpp, cnx, family);
2455 
2456 	return cnx;
2457 
2458  fail:
2459 	for (cnx--; cnx >= 0; cnx--)
2460 		xfrm_state_put(tpp[cnx]);
2461 	return error;
2462 
2463 }
2464 
2465 static int xfrm_get_tos(const struct flowi *fl, int family)
2466 {
2467 	if (family == AF_INET)
2468 		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2469 
2470 	return 0;
2471 }
2472 
2473 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2474 {
2475 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2476 	struct dst_ops *dst_ops;
2477 	struct xfrm_dst *xdst;
2478 
2479 	if (!afinfo)
2480 		return ERR_PTR(-EINVAL);
2481 
2482 	switch (family) {
2483 	case AF_INET:
2484 		dst_ops = &net->xfrm.xfrm4_dst_ops;
2485 		break;
2486 #if IS_ENABLED(CONFIG_IPV6)
2487 	case AF_INET6:
2488 		dst_ops = &net->xfrm.xfrm6_dst_ops;
2489 		break;
2490 #endif
2491 	default:
2492 		BUG();
2493 	}
2494 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2495 
2496 	if (likely(xdst)) {
2497 		struct dst_entry *dst = &xdst->u.dst;
2498 
2499 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2500 	} else
2501 		xdst = ERR_PTR(-ENOBUFS);
2502 
2503 	rcu_read_unlock();
2504 
2505 	return xdst;
2506 }
2507 
2508 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2509 			   int nfheader_len)
2510 {
2511 	if (dst->ops->family == AF_INET6) {
2512 		struct rt6_info *rt = (struct rt6_info *)dst;
2513 		path->path_cookie = rt6_get_cookie(rt);
2514 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2515 	}
2516 }
2517 
2518 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2519 				const struct flowi *fl)
2520 {
2521 	const struct xfrm_policy_afinfo *afinfo =
2522 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2523 	int err;
2524 
2525 	if (!afinfo)
2526 		return -EINVAL;
2527 
2528 	err = afinfo->fill_dst(xdst, dev, fl);
2529 
2530 	rcu_read_unlock();
2531 
2532 	return err;
2533 }
2534 
2535 
2536 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2537  * all the metrics... Shortly, bundle a bundle.
2538  */
2539 
2540 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2541 					    struct xfrm_state **xfrm,
2542 					    struct xfrm_dst **bundle,
2543 					    int nx,
2544 					    const struct flowi *fl,
2545 					    struct dst_entry *dst)
2546 {
2547 	const struct xfrm_state_afinfo *afinfo;
2548 	const struct xfrm_mode *inner_mode;
2549 	struct net *net = xp_net(policy);
2550 	unsigned long now = jiffies;
2551 	struct net_device *dev;
2552 	struct xfrm_dst *xdst_prev = NULL;
2553 	struct xfrm_dst *xdst0 = NULL;
2554 	int i = 0;
2555 	int err;
2556 	int header_len = 0;
2557 	int nfheader_len = 0;
2558 	int trailer_len = 0;
2559 	int tos;
2560 	int family = policy->selector.family;
2561 	xfrm_address_t saddr, daddr;
2562 
2563 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2564 
2565 	tos = xfrm_get_tos(fl, family);
2566 
2567 	dst_hold(dst);
2568 
2569 	for (; i < nx; i++) {
2570 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2571 		struct dst_entry *dst1 = &xdst->u.dst;
2572 
2573 		err = PTR_ERR(xdst);
2574 		if (IS_ERR(xdst)) {
2575 			dst_release(dst);
2576 			goto put_states;
2577 		}
2578 
2579 		bundle[i] = xdst;
2580 		if (!xdst_prev)
2581 			xdst0 = xdst;
2582 		else
2583 			/* Ref count is taken during xfrm_alloc_dst()
2584 			 * No need to do dst_clone() on dst1
2585 			 */
2586 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2587 
2588 		if (xfrm[i]->sel.family == AF_UNSPEC) {
2589 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2590 							xfrm_af2proto(family));
2591 			if (!inner_mode) {
2592 				err = -EAFNOSUPPORT;
2593 				dst_release(dst);
2594 				goto put_states;
2595 			}
2596 		} else
2597 			inner_mode = &xfrm[i]->inner_mode;
2598 
2599 		xdst->route = dst;
2600 		dst_copy_metrics(dst1, dst);
2601 
2602 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2603 			__u32 mark = 0;
2604 
2605 			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2606 				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2607 
2608 			family = xfrm[i]->props.family;
2609 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2610 					      &saddr, &daddr, family, mark);
2611 			err = PTR_ERR(dst);
2612 			if (IS_ERR(dst))
2613 				goto put_states;
2614 		} else
2615 			dst_hold(dst);
2616 
2617 		dst1->xfrm = xfrm[i];
2618 		xdst->xfrm_genid = xfrm[i]->genid;
2619 
2620 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2621 		dst1->lastuse = now;
2622 
2623 		dst1->input = dst_discard;
2624 
2625 		rcu_read_lock();
2626 		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2627 		if (likely(afinfo))
2628 			dst1->output = afinfo->output;
2629 		else
2630 			dst1->output = dst_discard_out;
2631 		rcu_read_unlock();
2632 
2633 		xdst_prev = xdst;
2634 
2635 		header_len += xfrm[i]->props.header_len;
2636 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2637 			nfheader_len += xfrm[i]->props.header_len;
2638 		trailer_len += xfrm[i]->props.trailer_len;
2639 	}
2640 
2641 	xfrm_dst_set_child(xdst_prev, dst);
2642 	xdst0->path = dst;
2643 
2644 	err = -ENODEV;
2645 	dev = dst->dev;
2646 	if (!dev)
2647 		goto free_dst;
2648 
2649 	xfrm_init_path(xdst0, dst, nfheader_len);
2650 	xfrm_init_pmtu(bundle, nx);
2651 
2652 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2653 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2654 		err = xfrm_fill_dst(xdst_prev, dev, fl);
2655 		if (err)
2656 			goto free_dst;
2657 
2658 		xdst_prev->u.dst.header_len = header_len;
2659 		xdst_prev->u.dst.trailer_len = trailer_len;
2660 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2661 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2662 	}
2663 
2664 	return &xdst0->u.dst;
2665 
2666 put_states:
2667 	for (; i < nx; i++)
2668 		xfrm_state_put(xfrm[i]);
2669 free_dst:
2670 	if (xdst0)
2671 		dst_release_immediate(&xdst0->u.dst);
2672 
2673 	return ERR_PTR(err);
2674 }
2675 
2676 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2677 				struct xfrm_policy **pols,
2678 				int *num_pols, int *num_xfrms)
2679 {
2680 	int i;
2681 
2682 	if (*num_pols == 0 || !pols[0]) {
2683 		*num_pols = 0;
2684 		*num_xfrms = 0;
2685 		return 0;
2686 	}
2687 	if (IS_ERR(pols[0]))
2688 		return PTR_ERR(pols[0]);
2689 
2690 	*num_xfrms = pols[0]->xfrm_nr;
2691 
2692 #ifdef CONFIG_XFRM_SUB_POLICY
2693 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2694 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2695 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2696 						    XFRM_POLICY_TYPE_MAIN,
2697 						    fl, family,
2698 						    XFRM_POLICY_OUT,
2699 						    pols[0]->if_id);
2700 		if (pols[1]) {
2701 			if (IS_ERR(pols[1])) {
2702 				xfrm_pols_put(pols, *num_pols);
2703 				return PTR_ERR(pols[1]);
2704 			}
2705 			(*num_pols)++;
2706 			(*num_xfrms) += pols[1]->xfrm_nr;
2707 		}
2708 	}
2709 #endif
2710 	for (i = 0; i < *num_pols; i++) {
2711 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2712 			*num_xfrms = -1;
2713 			break;
2714 		}
2715 	}
2716 
2717 	return 0;
2718 
2719 }
2720 
2721 static struct xfrm_dst *
2722 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2723 			       const struct flowi *fl, u16 family,
2724 			       struct dst_entry *dst_orig)
2725 {
2726 	struct net *net = xp_net(pols[0]);
2727 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2728 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2729 	struct xfrm_dst *xdst;
2730 	struct dst_entry *dst;
2731 	int err;
2732 
2733 	/* Try to instantiate a bundle */
2734 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2735 	if (err <= 0) {
2736 		if (err == 0)
2737 			return NULL;
2738 
2739 		if (err != -EAGAIN)
2740 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2741 		return ERR_PTR(err);
2742 	}
2743 
2744 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2745 	if (IS_ERR(dst)) {
2746 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2747 		return ERR_CAST(dst);
2748 	}
2749 
2750 	xdst = (struct xfrm_dst *)dst;
2751 	xdst->num_xfrms = err;
2752 	xdst->num_pols = num_pols;
2753 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2754 	xdst->policy_genid = atomic_read(&pols[0]->genid);
2755 
2756 	return xdst;
2757 }
2758 
2759 static void xfrm_policy_queue_process(struct timer_list *t)
2760 {
2761 	struct sk_buff *skb;
2762 	struct sock *sk;
2763 	struct dst_entry *dst;
2764 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2765 	struct net *net = xp_net(pol);
2766 	struct xfrm_policy_queue *pq = &pol->polq;
2767 	struct flowi fl;
2768 	struct sk_buff_head list;
2769 	__u32 skb_mark;
2770 
2771 	spin_lock(&pq->hold_queue.lock);
2772 	skb = skb_peek(&pq->hold_queue);
2773 	if (!skb) {
2774 		spin_unlock(&pq->hold_queue.lock);
2775 		goto out;
2776 	}
2777 	dst = skb_dst(skb);
2778 	sk = skb->sk;
2779 
2780 	/* Fixup the mark to support VTI. */
2781 	skb_mark = skb->mark;
2782 	skb->mark = pol->mark.v;
2783 	xfrm_decode_session(skb, &fl, dst->ops->family);
2784 	skb->mark = skb_mark;
2785 	spin_unlock(&pq->hold_queue.lock);
2786 
2787 	dst_hold(xfrm_dst_path(dst));
2788 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2789 	if (IS_ERR(dst))
2790 		goto purge_queue;
2791 
2792 	if (dst->flags & DST_XFRM_QUEUE) {
2793 		dst_release(dst);
2794 
2795 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2796 			goto purge_queue;
2797 
2798 		pq->timeout = pq->timeout << 1;
2799 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2800 			xfrm_pol_hold(pol);
2801 		goto out;
2802 	}
2803 
2804 	dst_release(dst);
2805 
2806 	__skb_queue_head_init(&list);
2807 
2808 	spin_lock(&pq->hold_queue.lock);
2809 	pq->timeout = 0;
2810 	skb_queue_splice_init(&pq->hold_queue, &list);
2811 	spin_unlock(&pq->hold_queue.lock);
2812 
2813 	while (!skb_queue_empty(&list)) {
2814 		skb = __skb_dequeue(&list);
2815 
2816 		/* Fixup the mark to support VTI. */
2817 		skb_mark = skb->mark;
2818 		skb->mark = pol->mark.v;
2819 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2820 		skb->mark = skb_mark;
2821 
2822 		dst_hold(xfrm_dst_path(skb_dst(skb)));
2823 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2824 		if (IS_ERR(dst)) {
2825 			kfree_skb(skb);
2826 			continue;
2827 		}
2828 
2829 		nf_reset_ct(skb);
2830 		skb_dst_drop(skb);
2831 		skb_dst_set(skb, dst);
2832 
2833 		dst_output(net, skb->sk, skb);
2834 	}
2835 
2836 out:
2837 	xfrm_pol_put(pol);
2838 	return;
2839 
2840 purge_queue:
2841 	pq->timeout = 0;
2842 	skb_queue_purge(&pq->hold_queue);
2843 	xfrm_pol_put(pol);
2844 }
2845 
2846 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2847 {
2848 	unsigned long sched_next;
2849 	struct dst_entry *dst = skb_dst(skb);
2850 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2851 	struct xfrm_policy *pol = xdst->pols[0];
2852 	struct xfrm_policy_queue *pq = &pol->polq;
2853 
2854 	if (unlikely(skb_fclone_busy(sk, skb))) {
2855 		kfree_skb(skb);
2856 		return 0;
2857 	}
2858 
2859 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2860 		kfree_skb(skb);
2861 		return -EAGAIN;
2862 	}
2863 
2864 	skb_dst_force(skb);
2865 
2866 	spin_lock_bh(&pq->hold_queue.lock);
2867 
2868 	if (!pq->timeout)
2869 		pq->timeout = XFRM_QUEUE_TMO_MIN;
2870 
2871 	sched_next = jiffies + pq->timeout;
2872 
2873 	if (del_timer(&pq->hold_timer)) {
2874 		if (time_before(pq->hold_timer.expires, sched_next))
2875 			sched_next = pq->hold_timer.expires;
2876 		xfrm_pol_put(pol);
2877 	}
2878 
2879 	__skb_queue_tail(&pq->hold_queue, skb);
2880 	if (!mod_timer(&pq->hold_timer, sched_next))
2881 		xfrm_pol_hold(pol);
2882 
2883 	spin_unlock_bh(&pq->hold_queue.lock);
2884 
2885 	return 0;
2886 }
2887 
2888 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2889 						 struct xfrm_flo *xflo,
2890 						 const struct flowi *fl,
2891 						 int num_xfrms,
2892 						 u16 family)
2893 {
2894 	int err;
2895 	struct net_device *dev;
2896 	struct dst_entry *dst;
2897 	struct dst_entry *dst1;
2898 	struct xfrm_dst *xdst;
2899 
2900 	xdst = xfrm_alloc_dst(net, family);
2901 	if (IS_ERR(xdst))
2902 		return xdst;
2903 
2904 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2905 	    net->xfrm.sysctl_larval_drop ||
2906 	    num_xfrms <= 0)
2907 		return xdst;
2908 
2909 	dst = xflo->dst_orig;
2910 	dst1 = &xdst->u.dst;
2911 	dst_hold(dst);
2912 	xdst->route = dst;
2913 
2914 	dst_copy_metrics(dst1, dst);
2915 
2916 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2917 	dst1->flags |= DST_XFRM_QUEUE;
2918 	dst1->lastuse = jiffies;
2919 
2920 	dst1->input = dst_discard;
2921 	dst1->output = xdst_queue_output;
2922 
2923 	dst_hold(dst);
2924 	xfrm_dst_set_child(xdst, dst);
2925 	xdst->path = dst;
2926 
2927 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2928 
2929 	err = -ENODEV;
2930 	dev = dst->dev;
2931 	if (!dev)
2932 		goto free_dst;
2933 
2934 	err = xfrm_fill_dst(xdst, dev, fl);
2935 	if (err)
2936 		goto free_dst;
2937 
2938 out:
2939 	return xdst;
2940 
2941 free_dst:
2942 	dst_release(dst1);
2943 	xdst = ERR_PTR(err);
2944 	goto out;
2945 }
2946 
2947 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2948 					   const struct flowi *fl,
2949 					   u16 family, u8 dir,
2950 					   struct xfrm_flo *xflo, u32 if_id)
2951 {
2952 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2953 	int num_pols = 0, num_xfrms = 0, err;
2954 	struct xfrm_dst *xdst;
2955 
2956 	/* Resolve policies to use if we couldn't get them from
2957 	 * previous cache entry */
2958 	num_pols = 1;
2959 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2960 	err = xfrm_expand_policies(fl, family, pols,
2961 					   &num_pols, &num_xfrms);
2962 	if (err < 0)
2963 		goto inc_error;
2964 	if (num_pols == 0)
2965 		return NULL;
2966 	if (num_xfrms <= 0)
2967 		goto make_dummy_bundle;
2968 
2969 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2970 					      xflo->dst_orig);
2971 	if (IS_ERR(xdst)) {
2972 		err = PTR_ERR(xdst);
2973 		if (err == -EREMOTE) {
2974 			xfrm_pols_put(pols, num_pols);
2975 			return NULL;
2976 		}
2977 
2978 		if (err != -EAGAIN)
2979 			goto error;
2980 		goto make_dummy_bundle;
2981 	} else if (xdst == NULL) {
2982 		num_xfrms = 0;
2983 		goto make_dummy_bundle;
2984 	}
2985 
2986 	return xdst;
2987 
2988 make_dummy_bundle:
2989 	/* We found policies, but there's no bundles to instantiate:
2990 	 * either because the policy blocks, has no transformations or
2991 	 * we could not build template (no xfrm_states).*/
2992 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2993 	if (IS_ERR(xdst)) {
2994 		xfrm_pols_put(pols, num_pols);
2995 		return ERR_CAST(xdst);
2996 	}
2997 	xdst->num_pols = num_pols;
2998 	xdst->num_xfrms = num_xfrms;
2999 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3000 
3001 	return xdst;
3002 
3003 inc_error:
3004 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3005 error:
3006 	xfrm_pols_put(pols, num_pols);
3007 	return ERR_PTR(err);
3008 }
3009 
3010 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3011 					struct dst_entry *dst_orig)
3012 {
3013 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3014 	struct dst_entry *ret;
3015 
3016 	if (!afinfo) {
3017 		dst_release(dst_orig);
3018 		return ERR_PTR(-EINVAL);
3019 	} else {
3020 		ret = afinfo->blackhole_route(net, dst_orig);
3021 	}
3022 	rcu_read_unlock();
3023 
3024 	return ret;
3025 }
3026 
3027 /* Finds/creates a bundle for given flow and if_id
3028  *
3029  * At the moment we eat a raw IP route. Mostly to speed up lookups
3030  * on interfaces with disabled IPsec.
3031  *
3032  * xfrm_lookup uses an if_id of 0 by default, and is provided for
3033  * compatibility
3034  */
3035 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3036 					struct dst_entry *dst_orig,
3037 					const struct flowi *fl,
3038 					const struct sock *sk,
3039 					int flags, u32 if_id)
3040 {
3041 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3042 	struct xfrm_dst *xdst;
3043 	struct dst_entry *dst, *route;
3044 	u16 family = dst_orig->ops->family;
3045 	u8 dir = XFRM_POLICY_OUT;
3046 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3047 
3048 	dst = NULL;
3049 	xdst = NULL;
3050 	route = NULL;
3051 
3052 	sk = sk_const_to_full_sk(sk);
3053 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3054 		num_pols = 1;
3055 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3056 						if_id);
3057 		err = xfrm_expand_policies(fl, family, pols,
3058 					   &num_pols, &num_xfrms);
3059 		if (err < 0)
3060 			goto dropdst;
3061 
3062 		if (num_pols) {
3063 			if (num_xfrms <= 0) {
3064 				drop_pols = num_pols;
3065 				goto no_transform;
3066 			}
3067 
3068 			xdst = xfrm_resolve_and_create_bundle(
3069 					pols, num_pols, fl,
3070 					family, dst_orig);
3071 
3072 			if (IS_ERR(xdst)) {
3073 				xfrm_pols_put(pols, num_pols);
3074 				err = PTR_ERR(xdst);
3075 				if (err == -EREMOTE)
3076 					goto nopol;
3077 
3078 				goto dropdst;
3079 			} else if (xdst == NULL) {
3080 				num_xfrms = 0;
3081 				drop_pols = num_pols;
3082 				goto no_transform;
3083 			}
3084 
3085 			route = xdst->route;
3086 		}
3087 	}
3088 
3089 	if (xdst == NULL) {
3090 		struct xfrm_flo xflo;
3091 
3092 		xflo.dst_orig = dst_orig;
3093 		xflo.flags = flags;
3094 
3095 		/* To accelerate a bit...  */
3096 		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3097 			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3098 			goto nopol;
3099 
3100 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3101 		if (xdst == NULL)
3102 			goto nopol;
3103 		if (IS_ERR(xdst)) {
3104 			err = PTR_ERR(xdst);
3105 			goto dropdst;
3106 		}
3107 
3108 		num_pols = xdst->num_pols;
3109 		num_xfrms = xdst->num_xfrms;
3110 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3111 		route = xdst->route;
3112 	}
3113 
3114 	dst = &xdst->u.dst;
3115 	if (route == NULL && num_xfrms > 0) {
3116 		/* The only case when xfrm_bundle_lookup() returns a
3117 		 * bundle with null route, is when the template could
3118 		 * not be resolved. It means policies are there, but
3119 		 * bundle could not be created, since we don't yet
3120 		 * have the xfrm_state's. We need to wait for KM to
3121 		 * negotiate new SA's or bail out with error.*/
3122 		if (net->xfrm.sysctl_larval_drop) {
3123 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3124 			err = -EREMOTE;
3125 			goto error;
3126 		}
3127 
3128 		err = -EAGAIN;
3129 
3130 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3131 		goto error;
3132 	}
3133 
3134 no_transform:
3135 	if (num_pols == 0)
3136 		goto nopol;
3137 
3138 	if ((flags & XFRM_LOOKUP_ICMP) &&
3139 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3140 		err = -ENOENT;
3141 		goto error;
3142 	}
3143 
3144 	for (i = 0; i < num_pols; i++)
3145 		pols[i]->curlft.use_time = ktime_get_real_seconds();
3146 
3147 	if (num_xfrms < 0) {
3148 		/* Prohibit the flow */
3149 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3150 		err = -EPERM;
3151 		goto error;
3152 	} else if (num_xfrms > 0) {
3153 		/* Flow transformed */
3154 		dst_release(dst_orig);
3155 	} else {
3156 		/* Flow passes untransformed */
3157 		dst_release(dst);
3158 		dst = dst_orig;
3159 	}
3160 ok:
3161 	xfrm_pols_put(pols, drop_pols);
3162 	if (dst && dst->xfrm &&
3163 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3164 		dst->flags |= DST_XFRM_TUNNEL;
3165 	return dst;
3166 
3167 nopol:
3168 	if (!(flags & XFRM_LOOKUP_ICMP)) {
3169 		dst = dst_orig;
3170 		goto ok;
3171 	}
3172 	err = -ENOENT;
3173 error:
3174 	dst_release(dst);
3175 dropdst:
3176 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3177 		dst_release(dst_orig);
3178 	xfrm_pols_put(pols, drop_pols);
3179 	return ERR_PTR(err);
3180 }
3181 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3182 
3183 /* Main function: finds/creates a bundle for given flow.
3184  *
3185  * At the moment we eat a raw IP route. Mostly to speed up lookups
3186  * on interfaces with disabled IPsec.
3187  */
3188 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3189 			      const struct flowi *fl, const struct sock *sk,
3190 			      int flags)
3191 {
3192 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3193 }
3194 EXPORT_SYMBOL(xfrm_lookup);
3195 
3196 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3197  * Otherwise we may send out blackholed packets.
3198  */
3199 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3200 				    const struct flowi *fl,
3201 				    const struct sock *sk, int flags)
3202 {
3203 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3204 					    flags | XFRM_LOOKUP_QUEUE |
3205 					    XFRM_LOOKUP_KEEP_DST_REF);
3206 
3207 	if (PTR_ERR(dst) == -EREMOTE)
3208 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3209 
3210 	if (IS_ERR(dst))
3211 		dst_release(dst_orig);
3212 
3213 	return dst;
3214 }
3215 EXPORT_SYMBOL(xfrm_lookup_route);
3216 
3217 static inline int
3218 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3219 {
3220 	struct sec_path *sp = skb_sec_path(skb);
3221 	struct xfrm_state *x;
3222 
3223 	if (!sp || idx < 0 || idx >= sp->len)
3224 		return 0;
3225 	x = sp->xvec[idx];
3226 	if (!x->type->reject)
3227 		return 0;
3228 	return x->type->reject(x, skb, fl);
3229 }
3230 
3231 /* When skb is transformed back to its "native" form, we have to
3232  * check policy restrictions. At the moment we make this in maximally
3233  * stupid way. Shame on me. :-) Of course, connected sockets must
3234  * have policy cached at them.
3235  */
3236 
3237 static inline int
3238 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3239 	      unsigned short family)
3240 {
3241 	if (xfrm_state_kern(x))
3242 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3243 	return	x->id.proto == tmpl->id.proto &&
3244 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3245 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3246 		x->props.mode == tmpl->mode &&
3247 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3248 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3249 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3250 		  xfrm_state_addr_cmp(tmpl, x, family));
3251 }
3252 
3253 /*
3254  * 0 or more than 0 is returned when validation is succeeded (either bypass
3255  * because of optional transport mode, or next index of the matched secpath
3256  * state with the template.
3257  * -1 is returned when no matching template is found.
3258  * Otherwise "-2 - errored_index" is returned.
3259  */
3260 static inline int
3261 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3262 	       unsigned short family)
3263 {
3264 	int idx = start;
3265 
3266 	if (tmpl->optional) {
3267 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3268 			return start;
3269 	} else
3270 		start = -1;
3271 	for (; idx < sp->len; idx++) {
3272 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3273 			return ++idx;
3274 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3275 			if (start == -1)
3276 				start = -2-idx;
3277 			break;
3278 		}
3279 	}
3280 	return start;
3281 }
3282 
3283 static void
3284 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3285 {
3286 	const struct iphdr *iph = ip_hdr(skb);
3287 	int ihl = iph->ihl;
3288 	u8 *xprth = skb_network_header(skb) + ihl * 4;
3289 	struct flowi4 *fl4 = &fl->u.ip4;
3290 	int oif = 0;
3291 
3292 	if (skb_dst(skb) && skb_dst(skb)->dev)
3293 		oif = skb_dst(skb)->dev->ifindex;
3294 
3295 	memset(fl4, 0, sizeof(struct flowi4));
3296 	fl4->flowi4_mark = skb->mark;
3297 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3298 
3299 	fl4->flowi4_proto = iph->protocol;
3300 	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3301 	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3302 	fl4->flowi4_tos = iph->tos;
3303 
3304 	if (!ip_is_fragment(iph)) {
3305 		switch (iph->protocol) {
3306 		case IPPROTO_UDP:
3307 		case IPPROTO_UDPLITE:
3308 		case IPPROTO_TCP:
3309 		case IPPROTO_SCTP:
3310 		case IPPROTO_DCCP:
3311 			if (xprth + 4 < skb->data ||
3312 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3313 				__be16 *ports;
3314 
3315 				xprth = skb_network_header(skb) + ihl * 4;
3316 				ports = (__be16 *)xprth;
3317 
3318 				fl4->fl4_sport = ports[!!reverse];
3319 				fl4->fl4_dport = ports[!reverse];
3320 			}
3321 			break;
3322 		case IPPROTO_ICMP:
3323 			if (xprth + 2 < skb->data ||
3324 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3325 				u8 *icmp;
3326 
3327 				xprth = skb_network_header(skb) + ihl * 4;
3328 				icmp = xprth;
3329 
3330 				fl4->fl4_icmp_type = icmp[0];
3331 				fl4->fl4_icmp_code = icmp[1];
3332 			}
3333 			break;
3334 		case IPPROTO_GRE:
3335 			if (xprth + 12 < skb->data ||
3336 			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3337 				__be16 *greflags;
3338 				__be32 *gre_hdr;
3339 
3340 				xprth = skb_network_header(skb) + ihl * 4;
3341 				greflags = (__be16 *)xprth;
3342 				gre_hdr = (__be32 *)xprth;
3343 
3344 				if (greflags[0] & GRE_KEY) {
3345 					if (greflags[0] & GRE_CSUM)
3346 						gre_hdr++;
3347 					fl4->fl4_gre_key = gre_hdr[1];
3348 				}
3349 			}
3350 			break;
3351 		default:
3352 			break;
3353 		}
3354 	}
3355 }
3356 
3357 #if IS_ENABLED(CONFIG_IPV6)
3358 static void
3359 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3360 {
3361 	struct flowi6 *fl6 = &fl->u.ip6;
3362 	int onlyproto = 0;
3363 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3364 	u32 offset = sizeof(*hdr);
3365 	struct ipv6_opt_hdr *exthdr;
3366 	const unsigned char *nh = skb_network_header(skb);
3367 	u16 nhoff = IP6CB(skb)->nhoff;
3368 	int oif = 0;
3369 	u8 nexthdr;
3370 
3371 	if (!nhoff)
3372 		nhoff = offsetof(struct ipv6hdr, nexthdr);
3373 
3374 	nexthdr = nh[nhoff];
3375 
3376 	if (skb_dst(skb) && skb_dst(skb)->dev)
3377 		oif = skb_dst(skb)->dev->ifindex;
3378 
3379 	memset(fl6, 0, sizeof(struct flowi6));
3380 	fl6->flowi6_mark = skb->mark;
3381 	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3382 
3383 	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3384 	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3385 
3386 	while (nh + offset + sizeof(*exthdr) < skb->data ||
3387 	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3388 		nh = skb_network_header(skb);
3389 		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3390 
3391 		switch (nexthdr) {
3392 		case NEXTHDR_FRAGMENT:
3393 			onlyproto = 1;
3394 			fallthrough;
3395 		case NEXTHDR_ROUTING:
3396 		case NEXTHDR_HOP:
3397 		case NEXTHDR_DEST:
3398 			offset += ipv6_optlen(exthdr);
3399 			nexthdr = exthdr->nexthdr;
3400 			exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3401 			break;
3402 		case IPPROTO_UDP:
3403 		case IPPROTO_UDPLITE:
3404 		case IPPROTO_TCP:
3405 		case IPPROTO_SCTP:
3406 		case IPPROTO_DCCP:
3407 			if (!onlyproto && (nh + offset + 4 < skb->data ||
3408 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3409 				__be16 *ports;
3410 
3411 				nh = skb_network_header(skb);
3412 				ports = (__be16 *)(nh + offset);
3413 				fl6->fl6_sport = ports[!!reverse];
3414 				fl6->fl6_dport = ports[!reverse];
3415 			}
3416 			fl6->flowi6_proto = nexthdr;
3417 			return;
3418 		case IPPROTO_ICMPV6:
3419 			if (!onlyproto && (nh + offset + 2 < skb->data ||
3420 			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3421 				u8 *icmp;
3422 
3423 				nh = skb_network_header(skb);
3424 				icmp = (u8 *)(nh + offset);
3425 				fl6->fl6_icmp_type = icmp[0];
3426 				fl6->fl6_icmp_code = icmp[1];
3427 			}
3428 			fl6->flowi6_proto = nexthdr;
3429 			return;
3430 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3431 		case IPPROTO_MH:
3432 			offset += ipv6_optlen(exthdr);
3433 			if (!onlyproto && (nh + offset + 3 < skb->data ||
3434 			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3435 				struct ip6_mh *mh;
3436 
3437 				nh = skb_network_header(skb);
3438 				mh = (struct ip6_mh *)(nh + offset);
3439 				fl6->fl6_mh_type = mh->ip6mh_type;
3440 			}
3441 			fl6->flowi6_proto = nexthdr;
3442 			return;
3443 #endif
3444 		default:
3445 			fl6->flowi6_proto = nexthdr;
3446 			return;
3447 		}
3448 	}
3449 }
3450 #endif
3451 
3452 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3453 			  unsigned int family, int reverse)
3454 {
3455 	switch (family) {
3456 	case AF_INET:
3457 		decode_session4(skb, fl, reverse);
3458 		break;
3459 #if IS_ENABLED(CONFIG_IPV6)
3460 	case AF_INET6:
3461 		decode_session6(skb, fl, reverse);
3462 		break;
3463 #endif
3464 	default:
3465 		return -EAFNOSUPPORT;
3466 	}
3467 
3468 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3469 }
3470 EXPORT_SYMBOL(__xfrm_decode_session);
3471 
3472 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3473 {
3474 	for (; k < sp->len; k++) {
3475 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3476 			*idxp = k;
3477 			return 1;
3478 		}
3479 	}
3480 
3481 	return 0;
3482 }
3483 
3484 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3485 			unsigned short family)
3486 {
3487 	struct net *net = dev_net(skb->dev);
3488 	struct xfrm_policy *pol;
3489 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3490 	int npols = 0;
3491 	int xfrm_nr;
3492 	int pi;
3493 	int reverse;
3494 	struct flowi fl;
3495 	int xerr_idx = -1;
3496 	const struct xfrm_if_cb *ifcb;
3497 	struct sec_path *sp;
3498 	struct xfrm_if *xi;
3499 	u32 if_id = 0;
3500 
3501 	rcu_read_lock();
3502 	ifcb = xfrm_if_get_cb();
3503 
3504 	if (ifcb) {
3505 		xi = ifcb->decode_session(skb, family);
3506 		if (xi) {
3507 			if_id = xi->p.if_id;
3508 			net = xi->net;
3509 		}
3510 	}
3511 	rcu_read_unlock();
3512 
3513 	reverse = dir & ~XFRM_POLICY_MASK;
3514 	dir &= XFRM_POLICY_MASK;
3515 
3516 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3517 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3518 		return 0;
3519 	}
3520 
3521 	nf_nat_decode_session(skb, &fl, family);
3522 
3523 	/* First, check used SA against their selectors. */
3524 	sp = skb_sec_path(skb);
3525 	if (sp) {
3526 		int i;
3527 
3528 		for (i = sp->len - 1; i >= 0; i--) {
3529 			struct xfrm_state *x = sp->xvec[i];
3530 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3531 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3532 				return 0;
3533 			}
3534 		}
3535 	}
3536 
3537 	pol = NULL;
3538 	sk = sk_to_full_sk(sk);
3539 	if (sk && sk->sk_policy[dir]) {
3540 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3541 		if (IS_ERR(pol)) {
3542 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3543 			return 0;
3544 		}
3545 	}
3546 
3547 	if (!pol)
3548 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3549 
3550 	if (IS_ERR(pol)) {
3551 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3552 		return 0;
3553 	}
3554 
3555 	if (!pol) {
3556 		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3557 			xfrm_secpath_reject(xerr_idx, skb, &fl);
3558 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3559 			return 0;
3560 		}
3561 		return 1;
3562 	}
3563 
3564 	pol->curlft.use_time = ktime_get_real_seconds();
3565 
3566 	pols[0] = pol;
3567 	npols++;
3568 #ifdef CONFIG_XFRM_SUB_POLICY
3569 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3570 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3571 						    &fl, family,
3572 						    XFRM_POLICY_IN, if_id);
3573 		if (pols[1]) {
3574 			if (IS_ERR(pols[1])) {
3575 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3576 				return 0;
3577 			}
3578 			pols[1]->curlft.use_time = ktime_get_real_seconds();
3579 			npols++;
3580 		}
3581 	}
3582 #endif
3583 
3584 	if (pol->action == XFRM_POLICY_ALLOW) {
3585 		static struct sec_path dummy;
3586 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3587 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3588 		struct xfrm_tmpl **tpp = tp;
3589 		int ti = 0;
3590 		int i, k;
3591 
3592 		sp = skb_sec_path(skb);
3593 		if (!sp)
3594 			sp = &dummy;
3595 
3596 		for (pi = 0; pi < npols; pi++) {
3597 			if (pols[pi] != pol &&
3598 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3599 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3600 				goto reject;
3601 			}
3602 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3603 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3604 				goto reject_error;
3605 			}
3606 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3607 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3608 		}
3609 		xfrm_nr = ti;
3610 		if (npols > 1) {
3611 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3612 			tpp = stp;
3613 		}
3614 
3615 		/* For each tunnel xfrm, find the first matching tmpl.
3616 		 * For each tmpl before that, find corresponding xfrm.
3617 		 * Order is _important_. Later we will implement
3618 		 * some barriers, but at the moment barriers
3619 		 * are implied between each two transformations.
3620 		 */
3621 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3622 			k = xfrm_policy_ok(tpp[i], sp, k, family);
3623 			if (k < 0) {
3624 				if (k < -1)
3625 					/* "-2 - errored_index" returned */
3626 					xerr_idx = -(2+k);
3627 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3628 				goto reject;
3629 			}
3630 		}
3631 
3632 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3633 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3634 			goto reject;
3635 		}
3636 
3637 		xfrm_pols_put(pols, npols);
3638 		return 1;
3639 	}
3640 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3641 
3642 reject:
3643 	xfrm_secpath_reject(xerr_idx, skb, &fl);
3644 reject_error:
3645 	xfrm_pols_put(pols, npols);
3646 	return 0;
3647 }
3648 EXPORT_SYMBOL(__xfrm_policy_check);
3649 
3650 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3651 {
3652 	struct net *net = dev_net(skb->dev);
3653 	struct flowi fl;
3654 	struct dst_entry *dst;
3655 	int res = 1;
3656 
3657 	if (xfrm_decode_session(skb, &fl, family) < 0) {
3658 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3659 		return 0;
3660 	}
3661 
3662 	skb_dst_force(skb);
3663 	if (!skb_dst(skb)) {
3664 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3665 		return 0;
3666 	}
3667 
3668 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3669 	if (IS_ERR(dst)) {
3670 		res = 0;
3671 		dst = NULL;
3672 	}
3673 	skb_dst_set(skb, dst);
3674 	return res;
3675 }
3676 EXPORT_SYMBOL(__xfrm_route_forward);
3677 
3678 /* Optimize later using cookies and generation ids. */
3679 
3680 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3681 {
3682 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3683 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3684 	 * get validated by dst_ops->check on every use.  We do this
3685 	 * because when a normal route referenced by an XFRM dst is
3686 	 * obsoleted we do not go looking around for all parent
3687 	 * referencing XFRM dsts so that we can invalidate them.  It
3688 	 * is just too much work.  Instead we make the checks here on
3689 	 * every use.  For example:
3690 	 *
3691 	 *	XFRM dst A --> IPv4 dst X
3692 	 *
3693 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3694 	 * in this example).  If X is marked obsolete, "A" will not
3695 	 * notice.  That's what we are validating here via the
3696 	 * stale_bundle() check.
3697 	 *
3698 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3699 	 * be marked on it.
3700 	 * This will force stale_bundle() to fail on any xdst bundle with
3701 	 * this dst linked in it.
3702 	 */
3703 	if (dst->obsolete < 0 && !stale_bundle(dst))
3704 		return dst;
3705 
3706 	return NULL;
3707 }
3708 
3709 static int stale_bundle(struct dst_entry *dst)
3710 {
3711 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3712 }
3713 
3714 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3715 {
3716 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3717 		dst->dev = dev_net(dev)->loopback_dev;
3718 		dev_hold(dst->dev);
3719 		dev_put(dev);
3720 	}
3721 }
3722 EXPORT_SYMBOL(xfrm_dst_ifdown);
3723 
3724 static void xfrm_link_failure(struct sk_buff *skb)
3725 {
3726 	/* Impossible. Such dst must be popped before reaches point of failure. */
3727 }
3728 
3729 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3730 {
3731 	if (dst) {
3732 		if (dst->obsolete) {
3733 			dst_release(dst);
3734 			dst = NULL;
3735 		}
3736 	}
3737 	return dst;
3738 }
3739 
3740 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3741 {
3742 	while (nr--) {
3743 		struct xfrm_dst *xdst = bundle[nr];
3744 		u32 pmtu, route_mtu_cached;
3745 		struct dst_entry *dst;
3746 
3747 		dst = &xdst->u.dst;
3748 		pmtu = dst_mtu(xfrm_dst_child(dst));
3749 		xdst->child_mtu_cached = pmtu;
3750 
3751 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3752 
3753 		route_mtu_cached = dst_mtu(xdst->route);
3754 		xdst->route_mtu_cached = route_mtu_cached;
3755 
3756 		if (pmtu > route_mtu_cached)
3757 			pmtu = route_mtu_cached;
3758 
3759 		dst_metric_set(dst, RTAX_MTU, pmtu);
3760 	}
3761 }
3762 
3763 /* Check that the bundle accepts the flow and its components are
3764  * still valid.
3765  */
3766 
3767 static int xfrm_bundle_ok(struct xfrm_dst *first)
3768 {
3769 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3770 	struct dst_entry *dst = &first->u.dst;
3771 	struct xfrm_dst *xdst;
3772 	int start_from, nr;
3773 	u32 mtu;
3774 
3775 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3776 	    (dst->dev && !netif_running(dst->dev)))
3777 		return 0;
3778 
3779 	if (dst->flags & DST_XFRM_QUEUE)
3780 		return 1;
3781 
3782 	start_from = nr = 0;
3783 	do {
3784 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3785 
3786 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3787 			return 0;
3788 		if (xdst->xfrm_genid != dst->xfrm->genid)
3789 			return 0;
3790 		if (xdst->num_pols > 0 &&
3791 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3792 			return 0;
3793 
3794 		bundle[nr++] = xdst;
3795 
3796 		mtu = dst_mtu(xfrm_dst_child(dst));
3797 		if (xdst->child_mtu_cached != mtu) {
3798 			start_from = nr;
3799 			xdst->child_mtu_cached = mtu;
3800 		}
3801 
3802 		if (!dst_check(xdst->route, xdst->route_cookie))
3803 			return 0;
3804 		mtu = dst_mtu(xdst->route);
3805 		if (xdst->route_mtu_cached != mtu) {
3806 			start_from = nr;
3807 			xdst->route_mtu_cached = mtu;
3808 		}
3809 
3810 		dst = xfrm_dst_child(dst);
3811 	} while (dst->xfrm);
3812 
3813 	if (likely(!start_from))
3814 		return 1;
3815 
3816 	xdst = bundle[start_from - 1];
3817 	mtu = xdst->child_mtu_cached;
3818 	while (start_from--) {
3819 		dst = &xdst->u.dst;
3820 
3821 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3822 		if (mtu > xdst->route_mtu_cached)
3823 			mtu = xdst->route_mtu_cached;
3824 		dst_metric_set(dst, RTAX_MTU, mtu);
3825 		if (!start_from)
3826 			break;
3827 
3828 		xdst = bundle[start_from - 1];
3829 		xdst->child_mtu_cached = mtu;
3830 	}
3831 
3832 	return 1;
3833 }
3834 
3835 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3836 {
3837 	return dst_metric_advmss(xfrm_dst_path(dst));
3838 }
3839 
3840 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3841 {
3842 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3843 
3844 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3845 }
3846 
3847 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3848 					const void *daddr)
3849 {
3850 	while (dst->xfrm) {
3851 		const struct xfrm_state *xfrm = dst->xfrm;
3852 
3853 		dst = xfrm_dst_child(dst);
3854 
3855 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3856 			continue;
3857 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3858 			daddr = xfrm->coaddr;
3859 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3860 			daddr = &xfrm->id.daddr;
3861 	}
3862 	return daddr;
3863 }
3864 
3865 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3866 					   struct sk_buff *skb,
3867 					   const void *daddr)
3868 {
3869 	const struct dst_entry *path = xfrm_dst_path(dst);
3870 
3871 	if (!skb)
3872 		daddr = xfrm_get_dst_nexthop(dst, daddr);
3873 	return path->ops->neigh_lookup(path, skb, daddr);
3874 }
3875 
3876 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3877 {
3878 	const struct dst_entry *path = xfrm_dst_path(dst);
3879 
3880 	daddr = xfrm_get_dst_nexthop(dst, daddr);
3881 	path->ops->confirm_neigh(path, daddr);
3882 }
3883 
3884 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3885 {
3886 	int err = 0;
3887 
3888 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3889 		return -EAFNOSUPPORT;
3890 
3891 	spin_lock(&xfrm_policy_afinfo_lock);
3892 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3893 		err = -EEXIST;
3894 	else {
3895 		struct dst_ops *dst_ops = afinfo->dst_ops;
3896 		if (likely(dst_ops->kmem_cachep == NULL))
3897 			dst_ops->kmem_cachep = xfrm_dst_cache;
3898 		if (likely(dst_ops->check == NULL))
3899 			dst_ops->check = xfrm_dst_check;
3900 		if (likely(dst_ops->default_advmss == NULL))
3901 			dst_ops->default_advmss = xfrm_default_advmss;
3902 		if (likely(dst_ops->mtu == NULL))
3903 			dst_ops->mtu = xfrm_mtu;
3904 		if (likely(dst_ops->negative_advice == NULL))
3905 			dst_ops->negative_advice = xfrm_negative_advice;
3906 		if (likely(dst_ops->link_failure == NULL))
3907 			dst_ops->link_failure = xfrm_link_failure;
3908 		if (likely(dst_ops->neigh_lookup == NULL))
3909 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3910 		if (likely(!dst_ops->confirm_neigh))
3911 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3912 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3913 	}
3914 	spin_unlock(&xfrm_policy_afinfo_lock);
3915 
3916 	return err;
3917 }
3918 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3919 
3920 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3921 {
3922 	struct dst_ops *dst_ops = afinfo->dst_ops;
3923 	int i;
3924 
3925 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3926 		if (xfrm_policy_afinfo[i] != afinfo)
3927 			continue;
3928 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3929 		break;
3930 	}
3931 
3932 	synchronize_rcu();
3933 
3934 	dst_ops->kmem_cachep = NULL;
3935 	dst_ops->check = NULL;
3936 	dst_ops->negative_advice = NULL;
3937 	dst_ops->link_failure = NULL;
3938 }
3939 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3940 
3941 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3942 {
3943 	spin_lock(&xfrm_if_cb_lock);
3944 	rcu_assign_pointer(xfrm_if_cb, ifcb);
3945 	spin_unlock(&xfrm_if_cb_lock);
3946 }
3947 EXPORT_SYMBOL(xfrm_if_register_cb);
3948 
3949 void xfrm_if_unregister_cb(void)
3950 {
3951 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3952 	synchronize_rcu();
3953 }
3954 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3955 
3956 #ifdef CONFIG_XFRM_STATISTICS
3957 static int __net_init xfrm_statistics_init(struct net *net)
3958 {
3959 	int rv;
3960 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3961 	if (!net->mib.xfrm_statistics)
3962 		return -ENOMEM;
3963 	rv = xfrm_proc_init(net);
3964 	if (rv < 0)
3965 		free_percpu(net->mib.xfrm_statistics);
3966 	return rv;
3967 }
3968 
3969 static void xfrm_statistics_fini(struct net *net)
3970 {
3971 	xfrm_proc_fini(net);
3972 	free_percpu(net->mib.xfrm_statistics);
3973 }
3974 #else
3975 static int __net_init xfrm_statistics_init(struct net *net)
3976 {
3977 	return 0;
3978 }
3979 
3980 static void xfrm_statistics_fini(struct net *net)
3981 {
3982 }
3983 #endif
3984 
3985 static int __net_init xfrm_policy_init(struct net *net)
3986 {
3987 	unsigned int hmask, sz;
3988 	int dir, err;
3989 
3990 	if (net_eq(net, &init_net)) {
3991 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
3992 					   sizeof(struct xfrm_dst),
3993 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3994 					   NULL);
3995 		err = rhashtable_init(&xfrm_policy_inexact_table,
3996 				      &xfrm_pol_inexact_params);
3997 		BUG_ON(err);
3998 	}
3999 
4000 	hmask = 8 - 1;
4001 	sz = (hmask+1) * sizeof(struct hlist_head);
4002 
4003 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4004 	if (!net->xfrm.policy_byidx)
4005 		goto out_byidx;
4006 	net->xfrm.policy_idx_hmask = hmask;
4007 
4008 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4009 		struct xfrm_policy_hash *htab;
4010 
4011 		net->xfrm.policy_count[dir] = 0;
4012 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4013 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4014 
4015 		htab = &net->xfrm.policy_bydst[dir];
4016 		htab->table = xfrm_hash_alloc(sz);
4017 		if (!htab->table)
4018 			goto out_bydst;
4019 		htab->hmask = hmask;
4020 		htab->dbits4 = 32;
4021 		htab->sbits4 = 32;
4022 		htab->dbits6 = 128;
4023 		htab->sbits6 = 128;
4024 	}
4025 	net->xfrm.policy_hthresh.lbits4 = 32;
4026 	net->xfrm.policy_hthresh.rbits4 = 32;
4027 	net->xfrm.policy_hthresh.lbits6 = 128;
4028 	net->xfrm.policy_hthresh.rbits6 = 128;
4029 
4030 	seqlock_init(&net->xfrm.policy_hthresh.lock);
4031 
4032 	INIT_LIST_HEAD(&net->xfrm.policy_all);
4033 	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4034 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4035 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4036 	return 0;
4037 
4038 out_bydst:
4039 	for (dir--; dir >= 0; dir--) {
4040 		struct xfrm_policy_hash *htab;
4041 
4042 		htab = &net->xfrm.policy_bydst[dir];
4043 		xfrm_hash_free(htab->table, sz);
4044 	}
4045 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4046 out_byidx:
4047 	return -ENOMEM;
4048 }
4049 
4050 static void xfrm_policy_fini(struct net *net)
4051 {
4052 	struct xfrm_pol_inexact_bin *b, *t;
4053 	unsigned int sz;
4054 	int dir;
4055 
4056 	flush_work(&net->xfrm.policy_hash_work);
4057 #ifdef CONFIG_XFRM_SUB_POLICY
4058 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4059 #endif
4060 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4061 
4062 	WARN_ON(!list_empty(&net->xfrm.policy_all));
4063 
4064 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4065 		struct xfrm_policy_hash *htab;
4066 
4067 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4068 
4069 		htab = &net->xfrm.policy_bydst[dir];
4070 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4071 		WARN_ON(!hlist_empty(htab->table));
4072 		xfrm_hash_free(htab->table, sz);
4073 	}
4074 
4075 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4076 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4077 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4078 
4079 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4080 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4081 		__xfrm_policy_inexact_prune_bin(b, true);
4082 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4083 }
4084 
4085 static int __net_init xfrm_net_init(struct net *net)
4086 {
4087 	int rv;
4088 
4089 	/* Initialize the per-net locks here */
4090 	spin_lock_init(&net->xfrm.xfrm_state_lock);
4091 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4092 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4093 
4094 	rv = xfrm_statistics_init(net);
4095 	if (rv < 0)
4096 		goto out_statistics;
4097 	rv = xfrm_state_init(net);
4098 	if (rv < 0)
4099 		goto out_state;
4100 	rv = xfrm_policy_init(net);
4101 	if (rv < 0)
4102 		goto out_policy;
4103 	rv = xfrm_sysctl_init(net);
4104 	if (rv < 0)
4105 		goto out_sysctl;
4106 
4107 	return 0;
4108 
4109 out_sysctl:
4110 	xfrm_policy_fini(net);
4111 out_policy:
4112 	xfrm_state_fini(net);
4113 out_state:
4114 	xfrm_statistics_fini(net);
4115 out_statistics:
4116 	return rv;
4117 }
4118 
4119 static void __net_exit xfrm_net_exit(struct net *net)
4120 {
4121 	xfrm_sysctl_fini(net);
4122 	xfrm_policy_fini(net);
4123 	xfrm_state_fini(net);
4124 	xfrm_statistics_fini(net);
4125 }
4126 
4127 static struct pernet_operations __net_initdata xfrm_net_ops = {
4128 	.init = xfrm_net_init,
4129 	.exit = xfrm_net_exit,
4130 };
4131 
4132 void __init xfrm_init(void)
4133 {
4134 	register_pernet_subsys(&xfrm_net_ops);
4135 	xfrm_dev_init();
4136 	seqcount_mutex_init(&xfrm_policy_hash_generation, &hash_resize_mutex);
4137 	xfrm_input_init();
4138 
4139 #ifdef CONFIG_XFRM_ESPINTCP
4140 	espintcp_init();
4141 #endif
4142 }
4143 
4144 #ifdef CONFIG_AUDITSYSCALL
4145 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4146 					 struct audit_buffer *audit_buf)
4147 {
4148 	struct xfrm_sec_ctx *ctx = xp->security;
4149 	struct xfrm_selector *sel = &xp->selector;
4150 
4151 	if (ctx)
4152 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4153 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4154 
4155 	switch (sel->family) {
4156 	case AF_INET:
4157 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4158 		if (sel->prefixlen_s != 32)
4159 			audit_log_format(audit_buf, " src_prefixlen=%d",
4160 					 sel->prefixlen_s);
4161 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4162 		if (sel->prefixlen_d != 32)
4163 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4164 					 sel->prefixlen_d);
4165 		break;
4166 	case AF_INET6:
4167 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4168 		if (sel->prefixlen_s != 128)
4169 			audit_log_format(audit_buf, " src_prefixlen=%d",
4170 					 sel->prefixlen_s);
4171 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4172 		if (sel->prefixlen_d != 128)
4173 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4174 					 sel->prefixlen_d);
4175 		break;
4176 	}
4177 }
4178 
4179 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4180 {
4181 	struct audit_buffer *audit_buf;
4182 
4183 	audit_buf = xfrm_audit_start("SPD-add");
4184 	if (audit_buf == NULL)
4185 		return;
4186 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4187 	audit_log_format(audit_buf, " res=%u", result);
4188 	xfrm_audit_common_policyinfo(xp, audit_buf);
4189 	audit_log_end(audit_buf);
4190 }
4191 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4192 
4193 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4194 			      bool task_valid)
4195 {
4196 	struct audit_buffer *audit_buf;
4197 
4198 	audit_buf = xfrm_audit_start("SPD-delete");
4199 	if (audit_buf == NULL)
4200 		return;
4201 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4202 	audit_log_format(audit_buf, " res=%u", result);
4203 	xfrm_audit_common_policyinfo(xp, audit_buf);
4204 	audit_log_end(audit_buf);
4205 }
4206 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4207 #endif
4208 
4209 #ifdef CONFIG_XFRM_MIGRATE
4210 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4211 					const struct xfrm_selector *sel_tgt)
4212 {
4213 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4214 		if (sel_tgt->family == sel_cmp->family &&
4215 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4216 				    sel_cmp->family) &&
4217 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4218 				    sel_cmp->family) &&
4219 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4220 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4221 			return true;
4222 		}
4223 	} else {
4224 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4225 			return true;
4226 		}
4227 	}
4228 	return false;
4229 }
4230 
4231 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4232 						    u8 dir, u8 type, struct net *net)
4233 {
4234 	struct xfrm_policy *pol, *ret = NULL;
4235 	struct hlist_head *chain;
4236 	u32 priority = ~0U;
4237 
4238 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4239 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4240 	hlist_for_each_entry(pol, chain, bydst) {
4241 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4242 		    pol->type == type) {
4243 			ret = pol;
4244 			priority = ret->priority;
4245 			break;
4246 		}
4247 	}
4248 	chain = &net->xfrm.policy_inexact[dir];
4249 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4250 		if ((pol->priority >= priority) && ret)
4251 			break;
4252 
4253 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4254 		    pol->type == type) {
4255 			ret = pol;
4256 			break;
4257 		}
4258 	}
4259 
4260 	xfrm_pol_hold(ret);
4261 
4262 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4263 
4264 	return ret;
4265 }
4266 
4267 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4268 {
4269 	int match = 0;
4270 
4271 	if (t->mode == m->mode && t->id.proto == m->proto &&
4272 	    (m->reqid == 0 || t->reqid == m->reqid)) {
4273 		switch (t->mode) {
4274 		case XFRM_MODE_TUNNEL:
4275 		case XFRM_MODE_BEET:
4276 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4277 					    m->old_family) &&
4278 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4279 					    m->old_family)) {
4280 				match = 1;
4281 			}
4282 			break;
4283 		case XFRM_MODE_TRANSPORT:
4284 			/* in case of transport mode, template does not store
4285 			   any IP addresses, hence we just compare mode and
4286 			   protocol */
4287 			match = 1;
4288 			break;
4289 		default:
4290 			break;
4291 		}
4292 	}
4293 	return match;
4294 }
4295 
4296 /* update endpoint address(es) of template(s) */
4297 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4298 			       struct xfrm_migrate *m, int num_migrate)
4299 {
4300 	struct xfrm_migrate *mp;
4301 	int i, j, n = 0;
4302 
4303 	write_lock_bh(&pol->lock);
4304 	if (unlikely(pol->walk.dead)) {
4305 		/* target policy has been deleted */
4306 		write_unlock_bh(&pol->lock);
4307 		return -ENOENT;
4308 	}
4309 
4310 	for (i = 0; i < pol->xfrm_nr; i++) {
4311 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4312 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4313 				continue;
4314 			n++;
4315 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4316 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4317 				continue;
4318 			/* update endpoints */
4319 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4320 			       sizeof(pol->xfrm_vec[i].id.daddr));
4321 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4322 			       sizeof(pol->xfrm_vec[i].saddr));
4323 			pol->xfrm_vec[i].encap_family = mp->new_family;
4324 			/* flush bundles */
4325 			atomic_inc(&pol->genid);
4326 		}
4327 	}
4328 
4329 	write_unlock_bh(&pol->lock);
4330 
4331 	if (!n)
4332 		return -ENODATA;
4333 
4334 	return 0;
4335 }
4336 
4337 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4338 {
4339 	int i, j;
4340 
4341 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4342 		return -EINVAL;
4343 
4344 	for (i = 0; i < num_migrate; i++) {
4345 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4346 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4347 			return -EINVAL;
4348 
4349 		/* check if there is any duplicated entry */
4350 		for (j = i + 1; j < num_migrate; j++) {
4351 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4352 				    sizeof(m[i].old_daddr)) &&
4353 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4354 				    sizeof(m[i].old_saddr)) &&
4355 			    m[i].proto == m[j].proto &&
4356 			    m[i].mode == m[j].mode &&
4357 			    m[i].reqid == m[j].reqid &&
4358 			    m[i].old_family == m[j].old_family)
4359 				return -EINVAL;
4360 		}
4361 	}
4362 
4363 	return 0;
4364 }
4365 
4366 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4367 		 struct xfrm_migrate *m, int num_migrate,
4368 		 struct xfrm_kmaddress *k, struct net *net,
4369 		 struct xfrm_encap_tmpl *encap)
4370 {
4371 	int i, err, nx_cur = 0, nx_new = 0;
4372 	struct xfrm_policy *pol = NULL;
4373 	struct xfrm_state *x, *xc;
4374 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4375 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4376 	struct xfrm_migrate *mp;
4377 
4378 	/* Stage 0 - sanity checks */
4379 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4380 		goto out;
4381 
4382 	if (dir >= XFRM_POLICY_MAX) {
4383 		err = -EINVAL;
4384 		goto out;
4385 	}
4386 
4387 	/* Stage 1 - find policy */
4388 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4389 		err = -ENOENT;
4390 		goto out;
4391 	}
4392 
4393 	/* Stage 2 - find and update state(s) */
4394 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4395 		if ((x = xfrm_migrate_state_find(mp, net))) {
4396 			x_cur[nx_cur] = x;
4397 			nx_cur++;
4398 			xc = xfrm_state_migrate(x, mp, encap);
4399 			if (xc) {
4400 				x_new[nx_new] = xc;
4401 				nx_new++;
4402 			} else {
4403 				err = -ENODATA;
4404 				goto restore_state;
4405 			}
4406 		}
4407 	}
4408 
4409 	/* Stage 3 - update policy */
4410 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4411 		goto restore_state;
4412 
4413 	/* Stage 4 - delete old state(s) */
4414 	if (nx_cur) {
4415 		xfrm_states_put(x_cur, nx_cur);
4416 		xfrm_states_delete(x_cur, nx_cur);
4417 	}
4418 
4419 	/* Stage 5 - announce */
4420 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4421 
4422 	xfrm_pol_put(pol);
4423 
4424 	return 0;
4425 out:
4426 	return err;
4427 
4428 restore_state:
4429 	if (pol)
4430 		xfrm_pol_put(pol);
4431 	if (nx_cur)
4432 		xfrm_states_put(x_cur, nx_cur);
4433 	if (nx_new)
4434 		xfrm_states_delete(x_new, nx_new);
4435 
4436 	return err;
4437 }
4438 EXPORT_SYMBOL(xfrm_migrate);
4439 #endif
4440