xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision 0cd08b10)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_policy.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	Kazunori MIYAZAWA @USAGI
11  * 	YOSHIFUJI Hideaki
12  * 		Split up af-specific portion
13  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14  *
15  */
16 
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/xfrm.h>
35 #include <net/ip.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
37 #include <net/mip6.h>
38 #endif
39 #ifdef CONFIG_XFRM_STATISTICS
40 #include <net/snmp.h>
41 #endif
42 #ifdef CONFIG_INET_ESPINTCP
43 #include <net/espintcp.h>
44 #endif
45 
46 #include "xfrm_hash.h"
47 
48 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
49 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
50 #define XFRM_MAX_QUEUE_LEN	100
51 
52 struct xfrm_flo {
53 	struct dst_entry *dst_orig;
54 	u8 flags;
55 };
56 
57 /* prefixes smaller than this are stored in lists, not trees. */
58 #define INEXACT_PREFIXLEN_IPV4	16
59 #define INEXACT_PREFIXLEN_IPV6	48
60 
61 struct xfrm_pol_inexact_node {
62 	struct rb_node node;
63 	union {
64 		xfrm_address_t addr;
65 		struct rcu_head rcu;
66 	};
67 	u8 prefixlen;
68 
69 	struct rb_root root;
70 
71 	/* the policies matching this node, can be empty list */
72 	struct hlist_head hhead;
73 };
74 
75 /* xfrm inexact policy search tree:
76  * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
77  *  |
78  * +---- root_d: sorted by daddr:prefix
79  * |                 |
80  * |        xfrm_pol_inexact_node
81  * |                 |
82  * |                 +- root: sorted by saddr/prefix
83  * |                 |              |
84  * |                 |         xfrm_pol_inexact_node
85  * |                 |              |
86  * |                 |              + root: unused
87  * |                 |              |
88  * |                 |              + hhead: saddr:daddr policies
89  * |                 |
90  * |                 +- coarse policies and all any:daddr policies
91  * |
92  * +---- root_s: sorted by saddr:prefix
93  * |                 |
94  * |        xfrm_pol_inexact_node
95  * |                 |
96  * |                 + root: unused
97  * |                 |
98  * |                 + hhead: saddr:any policies
99  * |
100  * +---- coarse policies and all any:any policies
101  *
102  * Lookups return four candidate lists:
103  * 1. any:any list from top-level xfrm_pol_inexact_bin
104  * 2. any:daddr list from daddr tree
105  * 3. saddr:daddr list from 2nd level daddr tree
106  * 4. saddr:any list from saddr tree
107  *
108  * This result set then needs to be searched for the policy with
109  * the lowest priority.  If two results have same prio, youngest one wins.
110  */
111 
112 struct xfrm_pol_inexact_key {
113 	possible_net_t net;
114 	u32 if_id;
115 	u16 family;
116 	u8 dir, type;
117 };
118 
119 struct xfrm_pol_inexact_bin {
120 	struct xfrm_pol_inexact_key k;
121 	struct rhash_head head;
122 	/* list containing '*:*' policies */
123 	struct hlist_head hhead;
124 
125 	seqcount_t count;
126 	/* tree sorted by daddr/prefix */
127 	struct rb_root root_d;
128 
129 	/* tree sorted by saddr/prefix */
130 	struct rb_root root_s;
131 
132 	/* slow path below */
133 	struct list_head inexact_bins;
134 	struct rcu_head rcu;
135 };
136 
137 enum xfrm_pol_inexact_candidate_type {
138 	XFRM_POL_CAND_BOTH,
139 	XFRM_POL_CAND_SADDR,
140 	XFRM_POL_CAND_DADDR,
141 	XFRM_POL_CAND_ANY,
142 
143 	XFRM_POL_CAND_MAX,
144 };
145 
146 struct xfrm_pol_inexact_candidates {
147 	struct hlist_head *res[XFRM_POL_CAND_MAX];
148 };
149 
150 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
151 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
152 
153 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
154 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
155 						__read_mostly;
156 
157 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
158 static __read_mostly seqcount_t xfrm_policy_hash_generation;
159 
160 static struct rhashtable xfrm_policy_inexact_table;
161 static const struct rhashtable_params xfrm_pol_inexact_params;
162 
163 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
164 static int stale_bundle(struct dst_entry *dst);
165 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
166 static void xfrm_policy_queue_process(struct timer_list *t);
167 
168 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
169 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
170 						int dir);
171 
172 static struct xfrm_pol_inexact_bin *
173 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
174 			   u32 if_id);
175 
176 static struct xfrm_pol_inexact_bin *
177 xfrm_policy_inexact_lookup_rcu(struct net *net,
178 			       u8 type, u16 family, u8 dir, u32 if_id);
179 static struct xfrm_policy *
180 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
181 			bool excl);
182 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
183 					    struct xfrm_policy *policy);
184 
185 static bool
186 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
187 				    struct xfrm_pol_inexact_bin *b,
188 				    const xfrm_address_t *saddr,
189 				    const xfrm_address_t *daddr);
190 
191 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
192 {
193 	return refcount_inc_not_zero(&policy->refcnt);
194 }
195 
196 static inline bool
197 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
198 {
199 	const struct flowi4 *fl4 = &fl->u.ip4;
200 
201 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
202 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
203 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
204 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
205 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
206 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
207 }
208 
209 static inline bool
210 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
211 {
212 	const struct flowi6 *fl6 = &fl->u.ip6;
213 
214 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
215 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
216 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
217 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
218 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
219 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
220 }
221 
222 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
223 			 unsigned short family)
224 {
225 	switch (family) {
226 	case AF_INET:
227 		return __xfrm4_selector_match(sel, fl);
228 	case AF_INET6:
229 		return __xfrm6_selector_match(sel, fl);
230 	}
231 	return false;
232 }
233 
234 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
235 {
236 	const struct xfrm_policy_afinfo *afinfo;
237 
238 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
239 		return NULL;
240 	rcu_read_lock();
241 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
242 	if (unlikely(!afinfo))
243 		rcu_read_unlock();
244 	return afinfo;
245 }
246 
247 /* Called with rcu_read_lock(). */
248 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
249 {
250 	return rcu_dereference(xfrm_if_cb);
251 }
252 
253 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
254 				    const xfrm_address_t *saddr,
255 				    const xfrm_address_t *daddr,
256 				    int family, u32 mark)
257 {
258 	const struct xfrm_policy_afinfo *afinfo;
259 	struct dst_entry *dst;
260 
261 	afinfo = xfrm_policy_get_afinfo(family);
262 	if (unlikely(afinfo == NULL))
263 		return ERR_PTR(-EAFNOSUPPORT);
264 
265 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
266 
267 	rcu_read_unlock();
268 
269 	return dst;
270 }
271 EXPORT_SYMBOL(__xfrm_dst_lookup);
272 
273 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
274 						int tos, int oif,
275 						xfrm_address_t *prev_saddr,
276 						xfrm_address_t *prev_daddr,
277 						int family, u32 mark)
278 {
279 	struct net *net = xs_net(x);
280 	xfrm_address_t *saddr = &x->props.saddr;
281 	xfrm_address_t *daddr = &x->id.daddr;
282 	struct dst_entry *dst;
283 
284 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
285 		saddr = x->coaddr;
286 		daddr = prev_daddr;
287 	}
288 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
289 		saddr = prev_saddr;
290 		daddr = x->coaddr;
291 	}
292 
293 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
294 
295 	if (!IS_ERR(dst)) {
296 		if (prev_saddr != saddr)
297 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
298 		if (prev_daddr != daddr)
299 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
300 	}
301 
302 	return dst;
303 }
304 
305 static inline unsigned long make_jiffies(long secs)
306 {
307 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
308 		return MAX_SCHEDULE_TIMEOUT-1;
309 	else
310 		return secs*HZ;
311 }
312 
313 static void xfrm_policy_timer(struct timer_list *t)
314 {
315 	struct xfrm_policy *xp = from_timer(xp, t, timer);
316 	time64_t now = ktime_get_real_seconds();
317 	time64_t next = TIME64_MAX;
318 	int warn = 0;
319 	int dir;
320 
321 	read_lock(&xp->lock);
322 
323 	if (unlikely(xp->walk.dead))
324 		goto out;
325 
326 	dir = xfrm_policy_id2dir(xp->index);
327 
328 	if (xp->lft.hard_add_expires_seconds) {
329 		time64_t tmo = xp->lft.hard_add_expires_seconds +
330 			xp->curlft.add_time - now;
331 		if (tmo <= 0)
332 			goto expired;
333 		if (tmo < next)
334 			next = tmo;
335 	}
336 	if (xp->lft.hard_use_expires_seconds) {
337 		time64_t tmo = xp->lft.hard_use_expires_seconds +
338 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
339 		if (tmo <= 0)
340 			goto expired;
341 		if (tmo < next)
342 			next = tmo;
343 	}
344 	if (xp->lft.soft_add_expires_seconds) {
345 		time64_t tmo = xp->lft.soft_add_expires_seconds +
346 			xp->curlft.add_time - now;
347 		if (tmo <= 0) {
348 			warn = 1;
349 			tmo = XFRM_KM_TIMEOUT;
350 		}
351 		if (tmo < next)
352 			next = tmo;
353 	}
354 	if (xp->lft.soft_use_expires_seconds) {
355 		time64_t tmo = xp->lft.soft_use_expires_seconds +
356 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
357 		if (tmo <= 0) {
358 			warn = 1;
359 			tmo = XFRM_KM_TIMEOUT;
360 		}
361 		if (tmo < next)
362 			next = tmo;
363 	}
364 
365 	if (warn)
366 		km_policy_expired(xp, dir, 0, 0);
367 	if (next != TIME64_MAX &&
368 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
369 		xfrm_pol_hold(xp);
370 
371 out:
372 	read_unlock(&xp->lock);
373 	xfrm_pol_put(xp);
374 	return;
375 
376 expired:
377 	read_unlock(&xp->lock);
378 	if (!xfrm_policy_delete(xp, dir))
379 		km_policy_expired(xp, dir, 1, 0);
380 	xfrm_pol_put(xp);
381 }
382 
383 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
384  * SPD calls.
385  */
386 
387 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
388 {
389 	struct xfrm_policy *policy;
390 
391 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
392 
393 	if (policy) {
394 		write_pnet(&policy->xp_net, net);
395 		INIT_LIST_HEAD(&policy->walk.all);
396 		INIT_HLIST_NODE(&policy->bydst_inexact_list);
397 		INIT_HLIST_NODE(&policy->bydst);
398 		INIT_HLIST_NODE(&policy->byidx);
399 		rwlock_init(&policy->lock);
400 		refcount_set(&policy->refcnt, 1);
401 		skb_queue_head_init(&policy->polq.hold_queue);
402 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
403 		timer_setup(&policy->polq.hold_timer,
404 			    xfrm_policy_queue_process, 0);
405 	}
406 	return policy;
407 }
408 EXPORT_SYMBOL(xfrm_policy_alloc);
409 
410 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
411 {
412 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
413 
414 	security_xfrm_policy_free(policy->security);
415 	kfree(policy);
416 }
417 
418 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
419 
420 void xfrm_policy_destroy(struct xfrm_policy *policy)
421 {
422 	BUG_ON(!policy->walk.dead);
423 
424 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
425 		BUG();
426 
427 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
428 }
429 EXPORT_SYMBOL(xfrm_policy_destroy);
430 
431 /* Rule must be locked. Release descendant resources, announce
432  * entry dead. The rule must be unlinked from lists to the moment.
433  */
434 
435 static void xfrm_policy_kill(struct xfrm_policy *policy)
436 {
437 	write_lock_bh(&policy->lock);
438 	policy->walk.dead = 1;
439 	write_unlock_bh(&policy->lock);
440 
441 	atomic_inc(&policy->genid);
442 
443 	if (del_timer(&policy->polq.hold_timer))
444 		xfrm_pol_put(policy);
445 	skb_queue_purge(&policy->polq.hold_queue);
446 
447 	if (del_timer(&policy->timer))
448 		xfrm_pol_put(policy);
449 
450 	xfrm_pol_put(policy);
451 }
452 
453 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
454 
455 static inline unsigned int idx_hash(struct net *net, u32 index)
456 {
457 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
458 }
459 
460 /* calculate policy hash thresholds */
461 static void __get_hash_thresh(struct net *net,
462 			      unsigned short family, int dir,
463 			      u8 *dbits, u8 *sbits)
464 {
465 	switch (family) {
466 	case AF_INET:
467 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
468 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
469 		break;
470 
471 	case AF_INET6:
472 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
473 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
474 		break;
475 
476 	default:
477 		*dbits = 0;
478 		*sbits = 0;
479 	}
480 }
481 
482 static struct hlist_head *policy_hash_bysel(struct net *net,
483 					    const struct xfrm_selector *sel,
484 					    unsigned short family, int dir)
485 {
486 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
487 	unsigned int hash;
488 	u8 dbits;
489 	u8 sbits;
490 
491 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
492 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
493 
494 	if (hash == hmask + 1)
495 		return NULL;
496 
497 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
498 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
499 }
500 
501 static struct hlist_head *policy_hash_direct(struct net *net,
502 					     const xfrm_address_t *daddr,
503 					     const xfrm_address_t *saddr,
504 					     unsigned short family, int dir)
505 {
506 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
507 	unsigned int hash;
508 	u8 dbits;
509 	u8 sbits;
510 
511 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
512 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
513 
514 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
515 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
516 }
517 
518 static void xfrm_dst_hash_transfer(struct net *net,
519 				   struct hlist_head *list,
520 				   struct hlist_head *ndsttable,
521 				   unsigned int nhashmask,
522 				   int dir)
523 {
524 	struct hlist_node *tmp, *entry0 = NULL;
525 	struct xfrm_policy *pol;
526 	unsigned int h0 = 0;
527 	u8 dbits;
528 	u8 sbits;
529 
530 redo:
531 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
532 		unsigned int h;
533 
534 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
535 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
536 				pol->family, nhashmask, dbits, sbits);
537 		if (!entry0) {
538 			hlist_del_rcu(&pol->bydst);
539 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
540 			h0 = h;
541 		} else {
542 			if (h != h0)
543 				continue;
544 			hlist_del_rcu(&pol->bydst);
545 			hlist_add_behind_rcu(&pol->bydst, entry0);
546 		}
547 		entry0 = &pol->bydst;
548 	}
549 	if (!hlist_empty(list)) {
550 		entry0 = NULL;
551 		goto redo;
552 	}
553 }
554 
555 static void xfrm_idx_hash_transfer(struct hlist_head *list,
556 				   struct hlist_head *nidxtable,
557 				   unsigned int nhashmask)
558 {
559 	struct hlist_node *tmp;
560 	struct xfrm_policy *pol;
561 
562 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
563 		unsigned int h;
564 
565 		h = __idx_hash(pol->index, nhashmask);
566 		hlist_add_head(&pol->byidx, nidxtable+h);
567 	}
568 }
569 
570 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
571 {
572 	return ((old_hmask + 1) << 1) - 1;
573 }
574 
575 static void xfrm_bydst_resize(struct net *net, int dir)
576 {
577 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
578 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
579 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
580 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
581 	struct hlist_head *odst;
582 	int i;
583 
584 	if (!ndst)
585 		return;
586 
587 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
588 	write_seqcount_begin(&xfrm_policy_hash_generation);
589 
590 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
591 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
592 
593 	for (i = hmask; i >= 0; i--)
594 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
595 
596 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
597 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
598 
599 	write_seqcount_end(&xfrm_policy_hash_generation);
600 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
601 
602 	synchronize_rcu();
603 
604 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
605 }
606 
607 static void xfrm_byidx_resize(struct net *net, int total)
608 {
609 	unsigned int hmask = net->xfrm.policy_idx_hmask;
610 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
611 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
612 	struct hlist_head *oidx = net->xfrm.policy_byidx;
613 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
614 	int i;
615 
616 	if (!nidx)
617 		return;
618 
619 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
620 
621 	for (i = hmask; i >= 0; i--)
622 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
623 
624 	net->xfrm.policy_byidx = nidx;
625 	net->xfrm.policy_idx_hmask = nhashmask;
626 
627 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
628 
629 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
630 }
631 
632 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
633 {
634 	unsigned int cnt = net->xfrm.policy_count[dir];
635 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
636 
637 	if (total)
638 		*total += cnt;
639 
640 	if ((hmask + 1) < xfrm_policy_hashmax &&
641 	    cnt > hmask)
642 		return 1;
643 
644 	return 0;
645 }
646 
647 static inline int xfrm_byidx_should_resize(struct net *net, int total)
648 {
649 	unsigned int hmask = net->xfrm.policy_idx_hmask;
650 
651 	if ((hmask + 1) < xfrm_policy_hashmax &&
652 	    total > hmask)
653 		return 1;
654 
655 	return 0;
656 }
657 
658 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
659 {
660 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
661 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
662 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
663 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
664 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
665 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
666 	si->spdhcnt = net->xfrm.policy_idx_hmask;
667 	si->spdhmcnt = xfrm_policy_hashmax;
668 }
669 EXPORT_SYMBOL(xfrm_spd_getinfo);
670 
671 static DEFINE_MUTEX(hash_resize_mutex);
672 static void xfrm_hash_resize(struct work_struct *work)
673 {
674 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
675 	int dir, total;
676 
677 	mutex_lock(&hash_resize_mutex);
678 
679 	total = 0;
680 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
681 		if (xfrm_bydst_should_resize(net, dir, &total))
682 			xfrm_bydst_resize(net, dir);
683 	}
684 	if (xfrm_byidx_should_resize(net, total))
685 		xfrm_byidx_resize(net, total);
686 
687 	mutex_unlock(&hash_resize_mutex);
688 }
689 
690 /* Make sure *pol can be inserted into fastbin.
691  * Useful to check that later insert requests will be sucessful
692  * (provided xfrm_policy_lock is held throughout).
693  */
694 static struct xfrm_pol_inexact_bin *
695 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
696 {
697 	struct xfrm_pol_inexact_bin *bin, *prev;
698 	struct xfrm_pol_inexact_key k = {
699 		.family = pol->family,
700 		.type = pol->type,
701 		.dir = dir,
702 		.if_id = pol->if_id,
703 	};
704 	struct net *net = xp_net(pol);
705 
706 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
707 
708 	write_pnet(&k.net, net);
709 	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
710 				     xfrm_pol_inexact_params);
711 	if (bin)
712 		return bin;
713 
714 	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
715 	if (!bin)
716 		return NULL;
717 
718 	bin->k = k;
719 	INIT_HLIST_HEAD(&bin->hhead);
720 	bin->root_d = RB_ROOT;
721 	bin->root_s = RB_ROOT;
722 	seqcount_init(&bin->count);
723 
724 	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
725 						&bin->k, &bin->head,
726 						xfrm_pol_inexact_params);
727 	if (!prev) {
728 		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
729 		return bin;
730 	}
731 
732 	kfree(bin);
733 
734 	return IS_ERR(prev) ? NULL : prev;
735 }
736 
737 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
738 					       int family, u8 prefixlen)
739 {
740 	if (xfrm_addr_any(addr, family))
741 		return true;
742 
743 	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
744 		return true;
745 
746 	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
747 		return true;
748 
749 	return false;
750 }
751 
752 static bool
753 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
754 {
755 	const xfrm_address_t *addr;
756 	bool saddr_any, daddr_any;
757 	u8 prefixlen;
758 
759 	addr = &policy->selector.saddr;
760 	prefixlen = policy->selector.prefixlen_s;
761 
762 	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
763 						       policy->family,
764 						       prefixlen);
765 	addr = &policy->selector.daddr;
766 	prefixlen = policy->selector.prefixlen_d;
767 	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
768 						       policy->family,
769 						       prefixlen);
770 	return saddr_any && daddr_any;
771 }
772 
773 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
774 				       const xfrm_address_t *addr, u8 prefixlen)
775 {
776 	node->addr = *addr;
777 	node->prefixlen = prefixlen;
778 }
779 
780 static struct xfrm_pol_inexact_node *
781 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
782 {
783 	struct xfrm_pol_inexact_node *node;
784 
785 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
786 	if (node)
787 		xfrm_pol_inexact_node_init(node, addr, prefixlen);
788 
789 	return node;
790 }
791 
792 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
793 				  const xfrm_address_t *b,
794 				  u8 prefixlen, u16 family)
795 {
796 	unsigned int pdw, pbi;
797 	int delta = 0;
798 
799 	switch (family) {
800 	case AF_INET:
801 		if (sizeof(long) == 4 && prefixlen == 0)
802 			return ntohl(a->a4) - ntohl(b->a4);
803 		return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
804 		       (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
805 	case AF_INET6:
806 		pdw = prefixlen >> 5;
807 		pbi = prefixlen & 0x1f;
808 
809 		if (pdw) {
810 			delta = memcmp(a->a6, b->a6, pdw << 2);
811 			if (delta)
812 				return delta;
813 		}
814 		if (pbi) {
815 			u32 mask = ~0u << (32 - pbi);
816 
817 			delta = (ntohl(a->a6[pdw]) & mask) -
818 				(ntohl(b->a6[pdw]) & mask);
819 		}
820 		break;
821 	default:
822 		break;
823 	}
824 
825 	return delta;
826 }
827 
828 static void xfrm_policy_inexact_list_reinsert(struct net *net,
829 					      struct xfrm_pol_inexact_node *n,
830 					      u16 family)
831 {
832 	unsigned int matched_s, matched_d;
833 	struct xfrm_policy *policy, *p;
834 
835 	matched_s = 0;
836 	matched_d = 0;
837 
838 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
839 		struct hlist_node *newpos = NULL;
840 		bool matches_s, matches_d;
841 
842 		if (!policy->bydst_reinsert)
843 			continue;
844 
845 		WARN_ON_ONCE(policy->family != family);
846 
847 		policy->bydst_reinsert = false;
848 		hlist_for_each_entry(p, &n->hhead, bydst) {
849 			if (policy->priority > p->priority)
850 				newpos = &p->bydst;
851 			else if (policy->priority == p->priority &&
852 				 policy->pos > p->pos)
853 				newpos = &p->bydst;
854 			else
855 				break;
856 		}
857 
858 		if (newpos)
859 			hlist_add_behind_rcu(&policy->bydst, newpos);
860 		else
861 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
862 
863 		/* paranoia checks follow.
864 		 * Check that the reinserted policy matches at least
865 		 * saddr or daddr for current node prefix.
866 		 *
867 		 * Matching both is fine, matching saddr in one policy
868 		 * (but not daddr) and then matching only daddr in another
869 		 * is a bug.
870 		 */
871 		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
872 						   &n->addr,
873 						   n->prefixlen,
874 						   family) == 0;
875 		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
876 						   &n->addr,
877 						   n->prefixlen,
878 						   family) == 0;
879 		if (matches_s && matches_d)
880 			continue;
881 
882 		WARN_ON_ONCE(!matches_s && !matches_d);
883 		if (matches_s)
884 			matched_s++;
885 		if (matches_d)
886 			matched_d++;
887 		WARN_ON_ONCE(matched_s && matched_d);
888 	}
889 }
890 
891 static void xfrm_policy_inexact_node_reinsert(struct net *net,
892 					      struct xfrm_pol_inexact_node *n,
893 					      struct rb_root *new,
894 					      u16 family)
895 {
896 	struct xfrm_pol_inexact_node *node;
897 	struct rb_node **p, *parent;
898 
899 	/* we should not have another subtree here */
900 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
901 restart:
902 	parent = NULL;
903 	p = &new->rb_node;
904 	while (*p) {
905 		u8 prefixlen;
906 		int delta;
907 
908 		parent = *p;
909 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
910 
911 		prefixlen = min(node->prefixlen, n->prefixlen);
912 
913 		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
914 					       prefixlen, family);
915 		if (delta < 0) {
916 			p = &parent->rb_left;
917 		} else if (delta > 0) {
918 			p = &parent->rb_right;
919 		} else {
920 			bool same_prefixlen = node->prefixlen == n->prefixlen;
921 			struct xfrm_policy *tmp;
922 
923 			hlist_for_each_entry(tmp, &n->hhead, bydst) {
924 				tmp->bydst_reinsert = true;
925 				hlist_del_rcu(&tmp->bydst);
926 			}
927 
928 			node->prefixlen = prefixlen;
929 
930 			xfrm_policy_inexact_list_reinsert(net, node, family);
931 
932 			if (same_prefixlen) {
933 				kfree_rcu(n, rcu);
934 				return;
935 			}
936 
937 			rb_erase(*p, new);
938 			kfree_rcu(n, rcu);
939 			n = node;
940 			goto restart;
941 		}
942 	}
943 
944 	rb_link_node_rcu(&n->node, parent, p);
945 	rb_insert_color(&n->node, new);
946 }
947 
948 /* merge nodes v and n */
949 static void xfrm_policy_inexact_node_merge(struct net *net,
950 					   struct xfrm_pol_inexact_node *v,
951 					   struct xfrm_pol_inexact_node *n,
952 					   u16 family)
953 {
954 	struct xfrm_pol_inexact_node *node;
955 	struct xfrm_policy *tmp;
956 	struct rb_node *rnode;
957 
958 	/* To-be-merged node v has a subtree.
959 	 *
960 	 * Dismantle it and insert its nodes to n->root.
961 	 */
962 	while ((rnode = rb_first(&v->root)) != NULL) {
963 		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
964 		rb_erase(&node->node, &v->root);
965 		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
966 						  family);
967 	}
968 
969 	hlist_for_each_entry(tmp, &v->hhead, bydst) {
970 		tmp->bydst_reinsert = true;
971 		hlist_del_rcu(&tmp->bydst);
972 	}
973 
974 	xfrm_policy_inexact_list_reinsert(net, n, family);
975 }
976 
977 static struct xfrm_pol_inexact_node *
978 xfrm_policy_inexact_insert_node(struct net *net,
979 				struct rb_root *root,
980 				xfrm_address_t *addr,
981 				u16 family, u8 prefixlen, u8 dir)
982 {
983 	struct xfrm_pol_inexact_node *cached = NULL;
984 	struct rb_node **p, *parent = NULL;
985 	struct xfrm_pol_inexact_node *node;
986 
987 	p = &root->rb_node;
988 	while (*p) {
989 		int delta;
990 
991 		parent = *p;
992 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
993 
994 		delta = xfrm_policy_addr_delta(addr, &node->addr,
995 					       node->prefixlen,
996 					       family);
997 		if (delta == 0 && prefixlen >= node->prefixlen) {
998 			WARN_ON_ONCE(cached); /* ipsec policies got lost */
999 			return node;
1000 		}
1001 
1002 		if (delta < 0)
1003 			p = &parent->rb_left;
1004 		else
1005 			p = &parent->rb_right;
1006 
1007 		if (prefixlen < node->prefixlen) {
1008 			delta = xfrm_policy_addr_delta(addr, &node->addr,
1009 						       prefixlen,
1010 						       family);
1011 			if (delta)
1012 				continue;
1013 
1014 			/* This node is a subnet of the new prefix. It needs
1015 			 * to be removed and re-inserted with the smaller
1016 			 * prefix and all nodes that are now also covered
1017 			 * by the reduced prefixlen.
1018 			 */
1019 			rb_erase(&node->node, root);
1020 
1021 			if (!cached) {
1022 				xfrm_pol_inexact_node_init(node, addr,
1023 							   prefixlen);
1024 				cached = node;
1025 			} else {
1026 				/* This node also falls within the new
1027 				 * prefixlen. Merge the to-be-reinserted
1028 				 * node and this one.
1029 				 */
1030 				xfrm_policy_inexact_node_merge(net, node,
1031 							       cached, family);
1032 				kfree_rcu(node, rcu);
1033 			}
1034 
1035 			/* restart */
1036 			p = &root->rb_node;
1037 			parent = NULL;
1038 		}
1039 	}
1040 
1041 	node = cached;
1042 	if (!node) {
1043 		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1044 		if (!node)
1045 			return NULL;
1046 	}
1047 
1048 	rb_link_node_rcu(&node->node, parent, p);
1049 	rb_insert_color(&node->node, root);
1050 
1051 	return node;
1052 }
1053 
1054 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1055 {
1056 	struct xfrm_pol_inexact_node *node;
1057 	struct rb_node *rn = rb_first(r);
1058 
1059 	while (rn) {
1060 		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1061 
1062 		xfrm_policy_inexact_gc_tree(&node->root, rm);
1063 		rn = rb_next(rn);
1064 
1065 		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1066 			WARN_ON_ONCE(rm);
1067 			continue;
1068 		}
1069 
1070 		rb_erase(&node->node, r);
1071 		kfree_rcu(node, rcu);
1072 	}
1073 }
1074 
1075 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1076 {
1077 	write_seqcount_begin(&b->count);
1078 	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1079 	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1080 	write_seqcount_end(&b->count);
1081 
1082 	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1083 	    !hlist_empty(&b->hhead)) {
1084 		WARN_ON_ONCE(net_exit);
1085 		return;
1086 	}
1087 
1088 	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1089 				   xfrm_pol_inexact_params) == 0) {
1090 		list_del(&b->inexact_bins);
1091 		kfree_rcu(b, rcu);
1092 	}
1093 }
1094 
1095 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1096 {
1097 	struct net *net = read_pnet(&b->k.net);
1098 
1099 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1100 	__xfrm_policy_inexact_prune_bin(b, false);
1101 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1102 }
1103 
1104 static void __xfrm_policy_inexact_flush(struct net *net)
1105 {
1106 	struct xfrm_pol_inexact_bin *bin, *t;
1107 
1108 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1109 
1110 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1111 		__xfrm_policy_inexact_prune_bin(bin, false);
1112 }
1113 
1114 static struct hlist_head *
1115 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1116 				struct xfrm_policy *policy, u8 dir)
1117 {
1118 	struct xfrm_pol_inexact_node *n;
1119 	struct net *net;
1120 
1121 	net = xp_net(policy);
1122 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1123 
1124 	if (xfrm_policy_inexact_insert_use_any_list(policy))
1125 		return &bin->hhead;
1126 
1127 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1128 					       policy->family,
1129 					       policy->selector.prefixlen_d)) {
1130 		write_seqcount_begin(&bin->count);
1131 		n = xfrm_policy_inexact_insert_node(net,
1132 						    &bin->root_s,
1133 						    &policy->selector.saddr,
1134 						    policy->family,
1135 						    policy->selector.prefixlen_s,
1136 						    dir);
1137 		write_seqcount_end(&bin->count);
1138 		if (!n)
1139 			return NULL;
1140 
1141 		return &n->hhead;
1142 	}
1143 
1144 	/* daddr is fixed */
1145 	write_seqcount_begin(&bin->count);
1146 	n = xfrm_policy_inexact_insert_node(net,
1147 					    &bin->root_d,
1148 					    &policy->selector.daddr,
1149 					    policy->family,
1150 					    policy->selector.prefixlen_d, dir);
1151 	write_seqcount_end(&bin->count);
1152 	if (!n)
1153 		return NULL;
1154 
1155 	/* saddr is wildcard */
1156 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1157 					       policy->family,
1158 					       policy->selector.prefixlen_s))
1159 		return &n->hhead;
1160 
1161 	write_seqcount_begin(&bin->count);
1162 	n = xfrm_policy_inexact_insert_node(net,
1163 					    &n->root,
1164 					    &policy->selector.saddr,
1165 					    policy->family,
1166 					    policy->selector.prefixlen_s, dir);
1167 	write_seqcount_end(&bin->count);
1168 	if (!n)
1169 		return NULL;
1170 
1171 	return &n->hhead;
1172 }
1173 
1174 static struct xfrm_policy *
1175 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1176 {
1177 	struct xfrm_pol_inexact_bin *bin;
1178 	struct xfrm_policy *delpol;
1179 	struct hlist_head *chain;
1180 	struct net *net;
1181 
1182 	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1183 	if (!bin)
1184 		return ERR_PTR(-ENOMEM);
1185 
1186 	net = xp_net(policy);
1187 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1188 
1189 	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1190 	if (!chain) {
1191 		__xfrm_policy_inexact_prune_bin(bin, false);
1192 		return ERR_PTR(-ENOMEM);
1193 	}
1194 
1195 	delpol = xfrm_policy_insert_list(chain, policy, excl);
1196 	if (delpol && excl) {
1197 		__xfrm_policy_inexact_prune_bin(bin, false);
1198 		return ERR_PTR(-EEXIST);
1199 	}
1200 
1201 	chain = &net->xfrm.policy_inexact[dir];
1202 	xfrm_policy_insert_inexact_list(chain, policy);
1203 
1204 	if (delpol)
1205 		__xfrm_policy_inexact_prune_bin(bin, false);
1206 
1207 	return delpol;
1208 }
1209 
1210 static void xfrm_hash_rebuild(struct work_struct *work)
1211 {
1212 	struct net *net = container_of(work, struct net,
1213 				       xfrm.policy_hthresh.work);
1214 	unsigned int hmask;
1215 	struct xfrm_policy *pol;
1216 	struct xfrm_policy *policy;
1217 	struct hlist_head *chain;
1218 	struct hlist_head *odst;
1219 	struct hlist_node *newpos;
1220 	int i;
1221 	int dir;
1222 	unsigned seq;
1223 	u8 lbits4, rbits4, lbits6, rbits6;
1224 
1225 	mutex_lock(&hash_resize_mutex);
1226 
1227 	/* read selector prefixlen thresholds */
1228 	do {
1229 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1230 
1231 		lbits4 = net->xfrm.policy_hthresh.lbits4;
1232 		rbits4 = net->xfrm.policy_hthresh.rbits4;
1233 		lbits6 = net->xfrm.policy_hthresh.lbits6;
1234 		rbits6 = net->xfrm.policy_hthresh.rbits6;
1235 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1236 
1237 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1238 	write_seqcount_begin(&xfrm_policy_hash_generation);
1239 
1240 	/* make sure that we can insert the indirect policies again before
1241 	 * we start with destructive action.
1242 	 */
1243 	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1244 		struct xfrm_pol_inexact_bin *bin;
1245 		u8 dbits, sbits;
1246 
1247 		dir = xfrm_policy_id2dir(policy->index);
1248 		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1249 			continue;
1250 
1251 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1252 			if (policy->family == AF_INET) {
1253 				dbits = rbits4;
1254 				sbits = lbits4;
1255 			} else {
1256 				dbits = rbits6;
1257 				sbits = lbits6;
1258 			}
1259 		} else {
1260 			if (policy->family == AF_INET) {
1261 				dbits = lbits4;
1262 				sbits = rbits4;
1263 			} else {
1264 				dbits = lbits6;
1265 				sbits = rbits6;
1266 			}
1267 		}
1268 
1269 		if (policy->selector.prefixlen_d < dbits ||
1270 		    policy->selector.prefixlen_s < sbits)
1271 			continue;
1272 
1273 		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1274 		if (!bin)
1275 			goto out_unlock;
1276 
1277 		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1278 			goto out_unlock;
1279 	}
1280 
1281 	/* reset the bydst and inexact table in all directions */
1282 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1283 		struct hlist_node *n;
1284 
1285 		hlist_for_each_entry_safe(policy, n,
1286 					  &net->xfrm.policy_inexact[dir],
1287 					  bydst_inexact_list) {
1288 			hlist_del_rcu(&policy->bydst);
1289 			hlist_del_init(&policy->bydst_inexact_list);
1290 		}
1291 
1292 		hmask = net->xfrm.policy_bydst[dir].hmask;
1293 		odst = net->xfrm.policy_bydst[dir].table;
1294 		for (i = hmask; i >= 0; i--) {
1295 			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1296 				hlist_del_rcu(&policy->bydst);
1297 		}
1298 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1299 			/* dir out => dst = remote, src = local */
1300 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1301 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1302 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1303 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1304 		} else {
1305 			/* dir in/fwd => dst = local, src = remote */
1306 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1307 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1308 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1309 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1310 		}
1311 	}
1312 
1313 	/* re-insert all policies by order of creation */
1314 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1315 		if (policy->walk.dead)
1316 			continue;
1317 		dir = xfrm_policy_id2dir(policy->index);
1318 		if (dir >= XFRM_POLICY_MAX) {
1319 			/* skip socket policies */
1320 			continue;
1321 		}
1322 		newpos = NULL;
1323 		chain = policy_hash_bysel(net, &policy->selector,
1324 					  policy->family, dir);
1325 
1326 		if (!chain) {
1327 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1328 
1329 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1330 			continue;
1331 		}
1332 
1333 		hlist_for_each_entry(pol, chain, bydst) {
1334 			if (policy->priority >= pol->priority)
1335 				newpos = &pol->bydst;
1336 			else
1337 				break;
1338 		}
1339 		if (newpos)
1340 			hlist_add_behind_rcu(&policy->bydst, newpos);
1341 		else
1342 			hlist_add_head_rcu(&policy->bydst, chain);
1343 	}
1344 
1345 out_unlock:
1346 	__xfrm_policy_inexact_flush(net);
1347 	write_seqcount_end(&xfrm_policy_hash_generation);
1348 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1349 
1350 	mutex_unlock(&hash_resize_mutex);
1351 }
1352 
1353 void xfrm_policy_hash_rebuild(struct net *net)
1354 {
1355 	schedule_work(&net->xfrm.policy_hthresh.work);
1356 }
1357 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1358 
1359 /* Generate new index... KAME seems to generate them ordered by cost
1360  * of an absolute inpredictability of ordering of rules. This will not pass. */
1361 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1362 {
1363 	static u32 idx_generator;
1364 
1365 	for (;;) {
1366 		struct hlist_head *list;
1367 		struct xfrm_policy *p;
1368 		u32 idx;
1369 		int found;
1370 
1371 		if (!index) {
1372 			idx = (idx_generator | dir);
1373 			idx_generator += 8;
1374 		} else {
1375 			idx = index;
1376 			index = 0;
1377 		}
1378 
1379 		if (idx == 0)
1380 			idx = 8;
1381 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1382 		found = 0;
1383 		hlist_for_each_entry(p, list, byidx) {
1384 			if (p->index == idx) {
1385 				found = 1;
1386 				break;
1387 			}
1388 		}
1389 		if (!found)
1390 			return idx;
1391 	}
1392 }
1393 
1394 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1395 {
1396 	u32 *p1 = (u32 *) s1;
1397 	u32 *p2 = (u32 *) s2;
1398 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1399 	int i;
1400 
1401 	for (i = 0; i < len; i++) {
1402 		if (p1[i] != p2[i])
1403 			return 1;
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static void xfrm_policy_requeue(struct xfrm_policy *old,
1410 				struct xfrm_policy *new)
1411 {
1412 	struct xfrm_policy_queue *pq = &old->polq;
1413 	struct sk_buff_head list;
1414 
1415 	if (skb_queue_empty(&pq->hold_queue))
1416 		return;
1417 
1418 	__skb_queue_head_init(&list);
1419 
1420 	spin_lock_bh(&pq->hold_queue.lock);
1421 	skb_queue_splice_init(&pq->hold_queue, &list);
1422 	if (del_timer(&pq->hold_timer))
1423 		xfrm_pol_put(old);
1424 	spin_unlock_bh(&pq->hold_queue.lock);
1425 
1426 	pq = &new->polq;
1427 
1428 	spin_lock_bh(&pq->hold_queue.lock);
1429 	skb_queue_splice(&list, &pq->hold_queue);
1430 	pq->timeout = XFRM_QUEUE_TMO_MIN;
1431 	if (!mod_timer(&pq->hold_timer, jiffies))
1432 		xfrm_pol_hold(new);
1433 	spin_unlock_bh(&pq->hold_queue.lock);
1434 }
1435 
1436 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
1437 				   struct xfrm_policy *pol)
1438 {
1439 	if (policy->mark.v == pol->mark.v &&
1440 	    policy->priority == pol->priority)
1441 		return true;
1442 
1443 	return false;
1444 }
1445 
1446 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1447 {
1448 	const struct xfrm_pol_inexact_key *k = data;
1449 	u32 a = k->type << 24 | k->dir << 16 | k->family;
1450 
1451 	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1452 			    seed);
1453 }
1454 
1455 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1456 {
1457 	const struct xfrm_pol_inexact_bin *b = data;
1458 
1459 	return xfrm_pol_bin_key(&b->k, 0, seed);
1460 }
1461 
1462 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1463 			    const void *ptr)
1464 {
1465 	const struct xfrm_pol_inexact_key *key = arg->key;
1466 	const struct xfrm_pol_inexact_bin *b = ptr;
1467 	int ret;
1468 
1469 	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1470 		return -1;
1471 
1472 	ret = b->k.dir ^ key->dir;
1473 	if (ret)
1474 		return ret;
1475 
1476 	ret = b->k.type ^ key->type;
1477 	if (ret)
1478 		return ret;
1479 
1480 	ret = b->k.family ^ key->family;
1481 	if (ret)
1482 		return ret;
1483 
1484 	return b->k.if_id ^ key->if_id;
1485 }
1486 
1487 static const struct rhashtable_params xfrm_pol_inexact_params = {
1488 	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1489 	.hashfn			= xfrm_pol_bin_key,
1490 	.obj_hashfn		= xfrm_pol_bin_obj,
1491 	.obj_cmpfn		= xfrm_pol_bin_cmp,
1492 	.automatic_shrinking	= true,
1493 };
1494 
1495 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1496 					    struct xfrm_policy *policy)
1497 {
1498 	struct xfrm_policy *pol, *delpol = NULL;
1499 	struct hlist_node *newpos = NULL;
1500 	int i = 0;
1501 
1502 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1503 		if (pol->type == policy->type &&
1504 		    pol->if_id == policy->if_id &&
1505 		    !selector_cmp(&pol->selector, &policy->selector) &&
1506 		    xfrm_policy_mark_match(policy, pol) &&
1507 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1508 		    !WARN_ON(delpol)) {
1509 			delpol = pol;
1510 			if (policy->priority > pol->priority)
1511 				continue;
1512 		} else if (policy->priority >= pol->priority) {
1513 			newpos = &pol->bydst_inexact_list;
1514 			continue;
1515 		}
1516 		if (delpol)
1517 			break;
1518 	}
1519 
1520 	if (newpos)
1521 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1522 	else
1523 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1524 
1525 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1526 		pol->pos = i;
1527 		i++;
1528 	}
1529 }
1530 
1531 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1532 						   struct xfrm_policy *policy,
1533 						   bool excl)
1534 {
1535 	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1536 
1537 	hlist_for_each_entry(pol, chain, bydst) {
1538 		if (pol->type == policy->type &&
1539 		    pol->if_id == policy->if_id &&
1540 		    !selector_cmp(&pol->selector, &policy->selector) &&
1541 		    xfrm_policy_mark_match(policy, pol) &&
1542 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1543 		    !WARN_ON(delpol)) {
1544 			if (excl)
1545 				return ERR_PTR(-EEXIST);
1546 			delpol = pol;
1547 			if (policy->priority > pol->priority)
1548 				continue;
1549 		} else if (policy->priority >= pol->priority) {
1550 			newpos = pol;
1551 			continue;
1552 		}
1553 		if (delpol)
1554 			break;
1555 	}
1556 
1557 	if (newpos)
1558 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1559 	else
1560 		hlist_add_head_rcu(&policy->bydst, chain);
1561 
1562 	return delpol;
1563 }
1564 
1565 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1566 {
1567 	struct net *net = xp_net(policy);
1568 	struct xfrm_policy *delpol;
1569 	struct hlist_head *chain;
1570 
1571 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1572 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1573 	if (chain)
1574 		delpol = xfrm_policy_insert_list(chain, policy, excl);
1575 	else
1576 		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1577 
1578 	if (IS_ERR(delpol)) {
1579 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1580 		return PTR_ERR(delpol);
1581 	}
1582 
1583 	__xfrm_policy_link(policy, dir);
1584 
1585 	/* After previous checking, family can either be AF_INET or AF_INET6 */
1586 	if (policy->family == AF_INET)
1587 		rt_genid_bump_ipv4(net);
1588 	else
1589 		rt_genid_bump_ipv6(net);
1590 
1591 	if (delpol) {
1592 		xfrm_policy_requeue(delpol, policy);
1593 		__xfrm_policy_unlink(delpol, dir);
1594 	}
1595 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1596 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1597 	policy->curlft.add_time = ktime_get_real_seconds();
1598 	policy->curlft.use_time = 0;
1599 	if (!mod_timer(&policy->timer, jiffies + HZ))
1600 		xfrm_pol_hold(policy);
1601 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1602 
1603 	if (delpol)
1604 		xfrm_policy_kill(delpol);
1605 	else if (xfrm_bydst_should_resize(net, dir, NULL))
1606 		schedule_work(&net->xfrm.policy_hash_work);
1607 
1608 	return 0;
1609 }
1610 EXPORT_SYMBOL(xfrm_policy_insert);
1611 
1612 static struct xfrm_policy *
1613 __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
1614 			u8 type, int dir,
1615 			struct xfrm_selector *sel,
1616 			struct xfrm_sec_ctx *ctx)
1617 {
1618 	struct xfrm_policy *pol;
1619 
1620 	if (!chain)
1621 		return NULL;
1622 
1623 	hlist_for_each_entry(pol, chain, bydst) {
1624 		if (pol->type == type &&
1625 		    pol->if_id == if_id &&
1626 		    (mark & pol->mark.m) == pol->mark.v &&
1627 		    !selector_cmp(sel, &pol->selector) &&
1628 		    xfrm_sec_ctx_match(ctx, pol->security))
1629 			return pol;
1630 	}
1631 
1632 	return NULL;
1633 }
1634 
1635 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1636 					  u8 type, int dir,
1637 					  struct xfrm_selector *sel,
1638 					  struct xfrm_sec_ctx *ctx, int delete,
1639 					  int *err)
1640 {
1641 	struct xfrm_pol_inexact_bin *bin = NULL;
1642 	struct xfrm_policy *pol, *ret = NULL;
1643 	struct hlist_head *chain;
1644 
1645 	*err = 0;
1646 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1647 	chain = policy_hash_bysel(net, sel, sel->family, dir);
1648 	if (!chain) {
1649 		struct xfrm_pol_inexact_candidates cand;
1650 		int i;
1651 
1652 		bin = xfrm_policy_inexact_lookup(net, type,
1653 						 sel->family, dir, if_id);
1654 		if (!bin) {
1655 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1656 			return NULL;
1657 		}
1658 
1659 		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1660 							 &sel->saddr,
1661 							 &sel->daddr)) {
1662 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1663 			return NULL;
1664 		}
1665 
1666 		pol = NULL;
1667 		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1668 			struct xfrm_policy *tmp;
1669 
1670 			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1671 						      if_id, type, dir,
1672 						      sel, ctx);
1673 			if (!tmp)
1674 				continue;
1675 
1676 			if (!pol || tmp->pos < pol->pos)
1677 				pol = tmp;
1678 		}
1679 	} else {
1680 		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1681 					      sel, ctx);
1682 	}
1683 
1684 	if (pol) {
1685 		xfrm_pol_hold(pol);
1686 		if (delete) {
1687 			*err = security_xfrm_policy_delete(pol->security);
1688 			if (*err) {
1689 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1690 				return pol;
1691 			}
1692 			__xfrm_policy_unlink(pol, dir);
1693 		}
1694 		ret = pol;
1695 	}
1696 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1697 
1698 	if (ret && delete)
1699 		xfrm_policy_kill(ret);
1700 	if (bin && delete)
1701 		xfrm_policy_inexact_prune_bin(bin);
1702 	return ret;
1703 }
1704 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1705 
1706 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
1707 				     u8 type, int dir, u32 id, int delete,
1708 				     int *err)
1709 {
1710 	struct xfrm_policy *pol, *ret;
1711 	struct hlist_head *chain;
1712 
1713 	*err = -ENOENT;
1714 	if (xfrm_policy_id2dir(id) != dir)
1715 		return NULL;
1716 
1717 	*err = 0;
1718 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1719 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1720 	ret = NULL;
1721 	hlist_for_each_entry(pol, chain, byidx) {
1722 		if (pol->type == type && pol->index == id &&
1723 		    pol->if_id == if_id &&
1724 		    (mark & pol->mark.m) == pol->mark.v) {
1725 			xfrm_pol_hold(pol);
1726 			if (delete) {
1727 				*err = security_xfrm_policy_delete(
1728 								pol->security);
1729 				if (*err) {
1730 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1731 					return pol;
1732 				}
1733 				__xfrm_policy_unlink(pol, dir);
1734 			}
1735 			ret = pol;
1736 			break;
1737 		}
1738 	}
1739 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1740 
1741 	if (ret && delete)
1742 		xfrm_policy_kill(ret);
1743 	return ret;
1744 }
1745 EXPORT_SYMBOL(xfrm_policy_byid);
1746 
1747 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1748 static inline int
1749 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1750 {
1751 	struct xfrm_policy *pol;
1752 	int err = 0;
1753 
1754 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1755 		if (pol->walk.dead ||
1756 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1757 		    pol->type != type)
1758 			continue;
1759 
1760 		err = security_xfrm_policy_delete(pol->security);
1761 		if (err) {
1762 			xfrm_audit_policy_delete(pol, 0, task_valid);
1763 			return err;
1764 		}
1765 	}
1766 	return err;
1767 }
1768 #else
1769 static inline int
1770 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1771 {
1772 	return 0;
1773 }
1774 #endif
1775 
1776 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1777 {
1778 	int dir, err = 0, cnt = 0;
1779 	struct xfrm_policy *pol;
1780 
1781 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1782 
1783 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1784 	if (err)
1785 		goto out;
1786 
1787 again:
1788 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1789 		dir = xfrm_policy_id2dir(pol->index);
1790 		if (pol->walk.dead ||
1791 		    dir >= XFRM_POLICY_MAX ||
1792 		    pol->type != type)
1793 			continue;
1794 
1795 		__xfrm_policy_unlink(pol, dir);
1796 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1797 		cnt++;
1798 		xfrm_audit_policy_delete(pol, 1, task_valid);
1799 		xfrm_policy_kill(pol);
1800 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1801 		goto again;
1802 	}
1803 	if (cnt)
1804 		__xfrm_policy_inexact_flush(net);
1805 	else
1806 		err = -ESRCH;
1807 out:
1808 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1809 	return err;
1810 }
1811 EXPORT_SYMBOL(xfrm_policy_flush);
1812 
1813 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1814 		     int (*func)(struct xfrm_policy *, int, int, void*),
1815 		     void *data)
1816 {
1817 	struct xfrm_policy *pol;
1818 	struct xfrm_policy_walk_entry *x;
1819 	int error = 0;
1820 
1821 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1822 	    walk->type != XFRM_POLICY_TYPE_ANY)
1823 		return -EINVAL;
1824 
1825 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1826 		return 0;
1827 
1828 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1829 	if (list_empty(&walk->walk.all))
1830 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1831 	else
1832 		x = list_first_entry(&walk->walk.all,
1833 				     struct xfrm_policy_walk_entry, all);
1834 
1835 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1836 		if (x->dead)
1837 			continue;
1838 		pol = container_of(x, struct xfrm_policy, walk);
1839 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1840 		    walk->type != pol->type)
1841 			continue;
1842 		error = func(pol, xfrm_policy_id2dir(pol->index),
1843 			     walk->seq, data);
1844 		if (error) {
1845 			list_move_tail(&walk->walk.all, &x->all);
1846 			goto out;
1847 		}
1848 		walk->seq++;
1849 	}
1850 	if (walk->seq == 0) {
1851 		error = -ENOENT;
1852 		goto out;
1853 	}
1854 	list_del_init(&walk->walk.all);
1855 out:
1856 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1857 	return error;
1858 }
1859 EXPORT_SYMBOL(xfrm_policy_walk);
1860 
1861 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1862 {
1863 	INIT_LIST_HEAD(&walk->walk.all);
1864 	walk->walk.dead = 1;
1865 	walk->type = type;
1866 	walk->seq = 0;
1867 }
1868 EXPORT_SYMBOL(xfrm_policy_walk_init);
1869 
1870 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1871 {
1872 	if (list_empty(&walk->walk.all))
1873 		return;
1874 
1875 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1876 	list_del(&walk->walk.all);
1877 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1878 }
1879 EXPORT_SYMBOL(xfrm_policy_walk_done);
1880 
1881 /*
1882  * Find policy to apply to this flow.
1883  *
1884  * Returns 0 if policy found, else an -errno.
1885  */
1886 static int xfrm_policy_match(const struct xfrm_policy *pol,
1887 			     const struct flowi *fl,
1888 			     u8 type, u16 family, int dir, u32 if_id)
1889 {
1890 	const struct xfrm_selector *sel = &pol->selector;
1891 	int ret = -ESRCH;
1892 	bool match;
1893 
1894 	if (pol->family != family ||
1895 	    pol->if_id != if_id ||
1896 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1897 	    pol->type != type)
1898 		return ret;
1899 
1900 	match = xfrm_selector_match(sel, fl, family);
1901 	if (match)
1902 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1903 						  dir);
1904 	return ret;
1905 }
1906 
1907 static struct xfrm_pol_inexact_node *
1908 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1909 				seqcount_t *count,
1910 				const xfrm_address_t *addr, u16 family)
1911 {
1912 	const struct rb_node *parent;
1913 	int seq;
1914 
1915 again:
1916 	seq = read_seqcount_begin(count);
1917 
1918 	parent = rcu_dereference_raw(r->rb_node);
1919 	while (parent) {
1920 		struct xfrm_pol_inexact_node *node;
1921 		int delta;
1922 
1923 		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1924 
1925 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1926 					       node->prefixlen, family);
1927 		if (delta < 0) {
1928 			parent = rcu_dereference_raw(parent->rb_left);
1929 			continue;
1930 		} else if (delta > 0) {
1931 			parent = rcu_dereference_raw(parent->rb_right);
1932 			continue;
1933 		}
1934 
1935 		return node;
1936 	}
1937 
1938 	if (read_seqcount_retry(count, seq))
1939 		goto again;
1940 
1941 	return NULL;
1942 }
1943 
1944 static bool
1945 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1946 				    struct xfrm_pol_inexact_bin *b,
1947 				    const xfrm_address_t *saddr,
1948 				    const xfrm_address_t *daddr)
1949 {
1950 	struct xfrm_pol_inexact_node *n;
1951 	u16 family;
1952 
1953 	if (!b)
1954 		return false;
1955 
1956 	family = b->k.family;
1957 	memset(cand, 0, sizeof(*cand));
1958 	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1959 
1960 	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1961 					    family);
1962 	if (n) {
1963 		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1964 		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1965 						    family);
1966 		if (n)
1967 			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1968 	}
1969 
1970 	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1971 					    family);
1972 	if (n)
1973 		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1974 
1975 	return true;
1976 }
1977 
1978 static struct xfrm_pol_inexact_bin *
1979 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1980 			       u8 dir, u32 if_id)
1981 {
1982 	struct xfrm_pol_inexact_key k = {
1983 		.family = family,
1984 		.type = type,
1985 		.dir = dir,
1986 		.if_id = if_id,
1987 	};
1988 
1989 	write_pnet(&k.net, net);
1990 
1991 	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1992 				 xfrm_pol_inexact_params);
1993 }
1994 
1995 static struct xfrm_pol_inexact_bin *
1996 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1997 			   u8 dir, u32 if_id)
1998 {
1999 	struct xfrm_pol_inexact_bin *bin;
2000 
2001 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2002 
2003 	rcu_read_lock();
2004 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2005 	rcu_read_unlock();
2006 
2007 	return bin;
2008 }
2009 
2010 static struct xfrm_policy *
2011 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2012 			      struct xfrm_policy *prefer,
2013 			      const struct flowi *fl,
2014 			      u8 type, u16 family, int dir, u32 if_id)
2015 {
2016 	u32 priority = prefer ? prefer->priority : ~0u;
2017 	struct xfrm_policy *pol;
2018 
2019 	if (!chain)
2020 		return NULL;
2021 
2022 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2023 		int err;
2024 
2025 		if (pol->priority > priority)
2026 			break;
2027 
2028 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2029 		if (err) {
2030 			if (err != -ESRCH)
2031 				return ERR_PTR(err);
2032 
2033 			continue;
2034 		}
2035 
2036 		if (prefer) {
2037 			/* matches.  Is it older than *prefer? */
2038 			if (pol->priority == priority &&
2039 			    prefer->pos < pol->pos)
2040 				return prefer;
2041 		}
2042 
2043 		return pol;
2044 	}
2045 
2046 	return NULL;
2047 }
2048 
2049 static struct xfrm_policy *
2050 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2051 			    struct xfrm_policy *prefer,
2052 			    const struct flowi *fl,
2053 			    u8 type, u16 family, int dir, u32 if_id)
2054 {
2055 	struct xfrm_policy *tmp;
2056 	int i;
2057 
2058 	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2059 		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2060 						    prefer,
2061 						    fl, type, family, dir,
2062 						    if_id);
2063 		if (!tmp)
2064 			continue;
2065 
2066 		if (IS_ERR(tmp))
2067 			return tmp;
2068 		prefer = tmp;
2069 	}
2070 
2071 	return prefer;
2072 }
2073 
2074 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2075 						     const struct flowi *fl,
2076 						     u16 family, u8 dir,
2077 						     u32 if_id)
2078 {
2079 	struct xfrm_pol_inexact_candidates cand;
2080 	const xfrm_address_t *daddr, *saddr;
2081 	struct xfrm_pol_inexact_bin *bin;
2082 	struct xfrm_policy *pol, *ret;
2083 	struct hlist_head *chain;
2084 	unsigned int sequence;
2085 	int err;
2086 
2087 	daddr = xfrm_flowi_daddr(fl, family);
2088 	saddr = xfrm_flowi_saddr(fl, family);
2089 	if (unlikely(!daddr || !saddr))
2090 		return NULL;
2091 
2092 	rcu_read_lock();
2093  retry:
2094 	do {
2095 		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2096 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2097 	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2098 
2099 	ret = NULL;
2100 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2101 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2102 		if (err) {
2103 			if (err == -ESRCH)
2104 				continue;
2105 			else {
2106 				ret = ERR_PTR(err);
2107 				goto fail;
2108 			}
2109 		} else {
2110 			ret = pol;
2111 			break;
2112 		}
2113 	}
2114 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2115 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2116 							 daddr))
2117 		goto skip_inexact;
2118 
2119 	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2120 					  family, dir, if_id);
2121 	if (pol) {
2122 		ret = pol;
2123 		if (IS_ERR(pol))
2124 			goto fail;
2125 	}
2126 
2127 skip_inexact:
2128 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2129 		goto retry;
2130 
2131 	if (ret && !xfrm_pol_hold_rcu(ret))
2132 		goto retry;
2133 fail:
2134 	rcu_read_unlock();
2135 
2136 	return ret;
2137 }
2138 
2139 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2140 					      const struct flowi *fl,
2141 					      u16 family, u8 dir, u32 if_id)
2142 {
2143 #ifdef CONFIG_XFRM_SUB_POLICY
2144 	struct xfrm_policy *pol;
2145 
2146 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2147 					dir, if_id);
2148 	if (pol != NULL)
2149 		return pol;
2150 #endif
2151 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2152 					 dir, if_id);
2153 }
2154 
2155 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2156 						 const struct flowi *fl,
2157 						 u16 family, u32 if_id)
2158 {
2159 	struct xfrm_policy *pol;
2160 
2161 	rcu_read_lock();
2162  again:
2163 	pol = rcu_dereference(sk->sk_policy[dir]);
2164 	if (pol != NULL) {
2165 		bool match;
2166 		int err = 0;
2167 
2168 		if (pol->family != family) {
2169 			pol = NULL;
2170 			goto out;
2171 		}
2172 
2173 		match = xfrm_selector_match(&pol->selector, fl, family);
2174 		if (match) {
2175 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2176 			    pol->if_id != if_id) {
2177 				pol = NULL;
2178 				goto out;
2179 			}
2180 			err = security_xfrm_policy_lookup(pol->security,
2181 						      fl->flowi_secid,
2182 						      dir);
2183 			if (!err) {
2184 				if (!xfrm_pol_hold_rcu(pol))
2185 					goto again;
2186 			} else if (err == -ESRCH) {
2187 				pol = NULL;
2188 			} else {
2189 				pol = ERR_PTR(err);
2190 			}
2191 		} else
2192 			pol = NULL;
2193 	}
2194 out:
2195 	rcu_read_unlock();
2196 	return pol;
2197 }
2198 
2199 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2200 {
2201 	struct net *net = xp_net(pol);
2202 
2203 	list_add(&pol->walk.all, &net->xfrm.policy_all);
2204 	net->xfrm.policy_count[dir]++;
2205 	xfrm_pol_hold(pol);
2206 }
2207 
2208 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2209 						int dir)
2210 {
2211 	struct net *net = xp_net(pol);
2212 
2213 	if (list_empty(&pol->walk.all))
2214 		return NULL;
2215 
2216 	/* Socket policies are not hashed. */
2217 	if (!hlist_unhashed(&pol->bydst)) {
2218 		hlist_del_rcu(&pol->bydst);
2219 		hlist_del_init(&pol->bydst_inexact_list);
2220 		hlist_del(&pol->byidx);
2221 	}
2222 
2223 	list_del_init(&pol->walk.all);
2224 	net->xfrm.policy_count[dir]--;
2225 
2226 	return pol;
2227 }
2228 
2229 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2230 {
2231 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2232 }
2233 
2234 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2235 {
2236 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2237 }
2238 
2239 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2240 {
2241 	struct net *net = xp_net(pol);
2242 
2243 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2244 	pol = __xfrm_policy_unlink(pol, dir);
2245 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2246 	if (pol) {
2247 		xfrm_policy_kill(pol);
2248 		return 0;
2249 	}
2250 	return -ENOENT;
2251 }
2252 EXPORT_SYMBOL(xfrm_policy_delete);
2253 
2254 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2255 {
2256 	struct net *net = sock_net(sk);
2257 	struct xfrm_policy *old_pol;
2258 
2259 #ifdef CONFIG_XFRM_SUB_POLICY
2260 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2261 		return -EINVAL;
2262 #endif
2263 
2264 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2265 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2266 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2267 	if (pol) {
2268 		pol->curlft.add_time = ktime_get_real_seconds();
2269 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2270 		xfrm_sk_policy_link(pol, dir);
2271 	}
2272 	rcu_assign_pointer(sk->sk_policy[dir], pol);
2273 	if (old_pol) {
2274 		if (pol)
2275 			xfrm_policy_requeue(old_pol, pol);
2276 
2277 		/* Unlinking succeeds always. This is the only function
2278 		 * allowed to delete or replace socket policy.
2279 		 */
2280 		xfrm_sk_policy_unlink(old_pol, dir);
2281 	}
2282 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2283 
2284 	if (old_pol) {
2285 		xfrm_policy_kill(old_pol);
2286 	}
2287 	return 0;
2288 }
2289 
2290 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2291 {
2292 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2293 	struct net *net = xp_net(old);
2294 
2295 	if (newp) {
2296 		newp->selector = old->selector;
2297 		if (security_xfrm_policy_clone(old->security,
2298 					       &newp->security)) {
2299 			kfree(newp);
2300 			return NULL;  /* ENOMEM */
2301 		}
2302 		newp->lft = old->lft;
2303 		newp->curlft = old->curlft;
2304 		newp->mark = old->mark;
2305 		newp->if_id = old->if_id;
2306 		newp->action = old->action;
2307 		newp->flags = old->flags;
2308 		newp->xfrm_nr = old->xfrm_nr;
2309 		newp->index = old->index;
2310 		newp->type = old->type;
2311 		newp->family = old->family;
2312 		memcpy(newp->xfrm_vec, old->xfrm_vec,
2313 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2314 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2315 		xfrm_sk_policy_link(newp, dir);
2316 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2317 		xfrm_pol_put(newp);
2318 	}
2319 	return newp;
2320 }
2321 
2322 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2323 {
2324 	const struct xfrm_policy *p;
2325 	struct xfrm_policy *np;
2326 	int i, ret = 0;
2327 
2328 	rcu_read_lock();
2329 	for (i = 0; i < 2; i++) {
2330 		p = rcu_dereference(osk->sk_policy[i]);
2331 		if (p) {
2332 			np = clone_policy(p, i);
2333 			if (unlikely(!np)) {
2334 				ret = -ENOMEM;
2335 				break;
2336 			}
2337 			rcu_assign_pointer(sk->sk_policy[i], np);
2338 		}
2339 	}
2340 	rcu_read_unlock();
2341 	return ret;
2342 }
2343 
2344 static int
2345 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2346 	       xfrm_address_t *remote, unsigned short family, u32 mark)
2347 {
2348 	int err;
2349 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2350 
2351 	if (unlikely(afinfo == NULL))
2352 		return -EINVAL;
2353 	err = afinfo->get_saddr(net, oif, local, remote, mark);
2354 	rcu_read_unlock();
2355 	return err;
2356 }
2357 
2358 /* Resolve list of templates for the flow, given policy. */
2359 
2360 static int
2361 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2362 		      struct xfrm_state **xfrm, unsigned short family)
2363 {
2364 	struct net *net = xp_net(policy);
2365 	int nx;
2366 	int i, error;
2367 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2368 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2369 	xfrm_address_t tmp;
2370 
2371 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2372 		struct xfrm_state *x;
2373 		xfrm_address_t *remote = daddr;
2374 		xfrm_address_t *local  = saddr;
2375 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2376 
2377 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2378 		    tmpl->mode == XFRM_MODE_BEET) {
2379 			remote = &tmpl->id.daddr;
2380 			local = &tmpl->saddr;
2381 			if (xfrm_addr_any(local, tmpl->encap_family)) {
2382 				error = xfrm_get_saddr(net, fl->flowi_oif,
2383 						       &tmp, remote,
2384 						       tmpl->encap_family, 0);
2385 				if (error)
2386 					goto fail;
2387 				local = &tmp;
2388 			}
2389 		}
2390 
2391 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2392 				    family, policy->if_id);
2393 
2394 		if (x && x->km.state == XFRM_STATE_VALID) {
2395 			xfrm[nx++] = x;
2396 			daddr = remote;
2397 			saddr = local;
2398 			continue;
2399 		}
2400 		if (x) {
2401 			error = (x->km.state == XFRM_STATE_ERROR ?
2402 				 -EINVAL : -EAGAIN);
2403 			xfrm_state_put(x);
2404 		} else if (error == -ESRCH) {
2405 			error = -EAGAIN;
2406 		}
2407 
2408 		if (!tmpl->optional)
2409 			goto fail;
2410 	}
2411 	return nx;
2412 
2413 fail:
2414 	for (nx--; nx >= 0; nx--)
2415 		xfrm_state_put(xfrm[nx]);
2416 	return error;
2417 }
2418 
2419 static int
2420 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2421 		  struct xfrm_state **xfrm, unsigned short family)
2422 {
2423 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2424 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2425 	int cnx = 0;
2426 	int error;
2427 	int ret;
2428 	int i;
2429 
2430 	for (i = 0; i < npols; i++) {
2431 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2432 			error = -ENOBUFS;
2433 			goto fail;
2434 		}
2435 
2436 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2437 		if (ret < 0) {
2438 			error = ret;
2439 			goto fail;
2440 		} else
2441 			cnx += ret;
2442 	}
2443 
2444 	/* found states are sorted for outbound processing */
2445 	if (npols > 1)
2446 		xfrm_state_sort(xfrm, tpp, cnx, family);
2447 
2448 	return cnx;
2449 
2450  fail:
2451 	for (cnx--; cnx >= 0; cnx--)
2452 		xfrm_state_put(tpp[cnx]);
2453 	return error;
2454 
2455 }
2456 
2457 static int xfrm_get_tos(const struct flowi *fl, int family)
2458 {
2459 	if (family == AF_INET)
2460 		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2461 
2462 	return 0;
2463 }
2464 
2465 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2466 {
2467 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2468 	struct dst_ops *dst_ops;
2469 	struct xfrm_dst *xdst;
2470 
2471 	if (!afinfo)
2472 		return ERR_PTR(-EINVAL);
2473 
2474 	switch (family) {
2475 	case AF_INET:
2476 		dst_ops = &net->xfrm.xfrm4_dst_ops;
2477 		break;
2478 #if IS_ENABLED(CONFIG_IPV6)
2479 	case AF_INET6:
2480 		dst_ops = &net->xfrm.xfrm6_dst_ops;
2481 		break;
2482 #endif
2483 	default:
2484 		BUG();
2485 	}
2486 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2487 
2488 	if (likely(xdst)) {
2489 		struct dst_entry *dst = &xdst->u.dst;
2490 
2491 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2492 	} else
2493 		xdst = ERR_PTR(-ENOBUFS);
2494 
2495 	rcu_read_unlock();
2496 
2497 	return xdst;
2498 }
2499 
2500 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2501 			   int nfheader_len)
2502 {
2503 	if (dst->ops->family == AF_INET6) {
2504 		struct rt6_info *rt = (struct rt6_info *)dst;
2505 		path->path_cookie = rt6_get_cookie(rt);
2506 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2507 	}
2508 }
2509 
2510 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2511 				const struct flowi *fl)
2512 {
2513 	const struct xfrm_policy_afinfo *afinfo =
2514 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2515 	int err;
2516 
2517 	if (!afinfo)
2518 		return -EINVAL;
2519 
2520 	err = afinfo->fill_dst(xdst, dev, fl);
2521 
2522 	rcu_read_unlock();
2523 
2524 	return err;
2525 }
2526 
2527 
2528 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2529  * all the metrics... Shortly, bundle a bundle.
2530  */
2531 
2532 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2533 					    struct xfrm_state **xfrm,
2534 					    struct xfrm_dst **bundle,
2535 					    int nx,
2536 					    const struct flowi *fl,
2537 					    struct dst_entry *dst)
2538 {
2539 	const struct xfrm_state_afinfo *afinfo;
2540 	const struct xfrm_mode *inner_mode;
2541 	struct net *net = xp_net(policy);
2542 	unsigned long now = jiffies;
2543 	struct net_device *dev;
2544 	struct xfrm_dst *xdst_prev = NULL;
2545 	struct xfrm_dst *xdst0 = NULL;
2546 	int i = 0;
2547 	int err;
2548 	int header_len = 0;
2549 	int nfheader_len = 0;
2550 	int trailer_len = 0;
2551 	int tos;
2552 	int family = policy->selector.family;
2553 	xfrm_address_t saddr, daddr;
2554 
2555 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2556 
2557 	tos = xfrm_get_tos(fl, family);
2558 
2559 	dst_hold(dst);
2560 
2561 	for (; i < nx; i++) {
2562 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2563 		struct dst_entry *dst1 = &xdst->u.dst;
2564 
2565 		err = PTR_ERR(xdst);
2566 		if (IS_ERR(xdst)) {
2567 			dst_release(dst);
2568 			goto put_states;
2569 		}
2570 
2571 		bundle[i] = xdst;
2572 		if (!xdst_prev)
2573 			xdst0 = xdst;
2574 		else
2575 			/* Ref count is taken during xfrm_alloc_dst()
2576 			 * No need to do dst_clone() on dst1
2577 			 */
2578 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2579 
2580 		if (xfrm[i]->sel.family == AF_UNSPEC) {
2581 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2582 							xfrm_af2proto(family));
2583 			if (!inner_mode) {
2584 				err = -EAFNOSUPPORT;
2585 				dst_release(dst);
2586 				goto put_states;
2587 			}
2588 		} else
2589 			inner_mode = &xfrm[i]->inner_mode;
2590 
2591 		xdst->route = dst;
2592 		dst_copy_metrics(dst1, dst);
2593 
2594 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2595 			__u32 mark = 0;
2596 
2597 			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2598 				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2599 
2600 			family = xfrm[i]->props.family;
2601 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2602 					      &saddr, &daddr, family, mark);
2603 			err = PTR_ERR(dst);
2604 			if (IS_ERR(dst))
2605 				goto put_states;
2606 		} else
2607 			dst_hold(dst);
2608 
2609 		dst1->xfrm = xfrm[i];
2610 		xdst->xfrm_genid = xfrm[i]->genid;
2611 
2612 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2613 		dst1->lastuse = now;
2614 
2615 		dst1->input = dst_discard;
2616 
2617 		rcu_read_lock();
2618 		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2619 		if (likely(afinfo))
2620 			dst1->output = afinfo->output;
2621 		else
2622 			dst1->output = dst_discard_out;
2623 		rcu_read_unlock();
2624 
2625 		xdst_prev = xdst;
2626 
2627 		header_len += xfrm[i]->props.header_len;
2628 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2629 			nfheader_len += xfrm[i]->props.header_len;
2630 		trailer_len += xfrm[i]->props.trailer_len;
2631 	}
2632 
2633 	xfrm_dst_set_child(xdst_prev, dst);
2634 	xdst0->path = dst;
2635 
2636 	err = -ENODEV;
2637 	dev = dst->dev;
2638 	if (!dev)
2639 		goto free_dst;
2640 
2641 	xfrm_init_path(xdst0, dst, nfheader_len);
2642 	xfrm_init_pmtu(bundle, nx);
2643 
2644 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2645 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2646 		err = xfrm_fill_dst(xdst_prev, dev, fl);
2647 		if (err)
2648 			goto free_dst;
2649 
2650 		xdst_prev->u.dst.header_len = header_len;
2651 		xdst_prev->u.dst.trailer_len = trailer_len;
2652 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2653 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2654 	}
2655 
2656 	return &xdst0->u.dst;
2657 
2658 put_states:
2659 	for (; i < nx; i++)
2660 		xfrm_state_put(xfrm[i]);
2661 free_dst:
2662 	if (xdst0)
2663 		dst_release_immediate(&xdst0->u.dst);
2664 
2665 	return ERR_PTR(err);
2666 }
2667 
2668 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2669 				struct xfrm_policy **pols,
2670 				int *num_pols, int *num_xfrms)
2671 {
2672 	int i;
2673 
2674 	if (*num_pols == 0 || !pols[0]) {
2675 		*num_pols = 0;
2676 		*num_xfrms = 0;
2677 		return 0;
2678 	}
2679 	if (IS_ERR(pols[0]))
2680 		return PTR_ERR(pols[0]);
2681 
2682 	*num_xfrms = pols[0]->xfrm_nr;
2683 
2684 #ifdef CONFIG_XFRM_SUB_POLICY
2685 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2686 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2687 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2688 						    XFRM_POLICY_TYPE_MAIN,
2689 						    fl, family,
2690 						    XFRM_POLICY_OUT,
2691 						    pols[0]->if_id);
2692 		if (pols[1]) {
2693 			if (IS_ERR(pols[1])) {
2694 				xfrm_pols_put(pols, *num_pols);
2695 				return PTR_ERR(pols[1]);
2696 			}
2697 			(*num_pols)++;
2698 			(*num_xfrms) += pols[1]->xfrm_nr;
2699 		}
2700 	}
2701 #endif
2702 	for (i = 0; i < *num_pols; i++) {
2703 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2704 			*num_xfrms = -1;
2705 			break;
2706 		}
2707 	}
2708 
2709 	return 0;
2710 
2711 }
2712 
2713 static struct xfrm_dst *
2714 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2715 			       const struct flowi *fl, u16 family,
2716 			       struct dst_entry *dst_orig)
2717 {
2718 	struct net *net = xp_net(pols[0]);
2719 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2720 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2721 	struct xfrm_dst *xdst;
2722 	struct dst_entry *dst;
2723 	int err;
2724 
2725 	/* Try to instantiate a bundle */
2726 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2727 	if (err <= 0) {
2728 		if (err == 0)
2729 			return NULL;
2730 
2731 		if (err != -EAGAIN)
2732 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2733 		return ERR_PTR(err);
2734 	}
2735 
2736 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2737 	if (IS_ERR(dst)) {
2738 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2739 		return ERR_CAST(dst);
2740 	}
2741 
2742 	xdst = (struct xfrm_dst *)dst;
2743 	xdst->num_xfrms = err;
2744 	xdst->num_pols = num_pols;
2745 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2746 	xdst->policy_genid = atomic_read(&pols[0]->genid);
2747 
2748 	return xdst;
2749 }
2750 
2751 static void xfrm_policy_queue_process(struct timer_list *t)
2752 {
2753 	struct sk_buff *skb;
2754 	struct sock *sk;
2755 	struct dst_entry *dst;
2756 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2757 	struct net *net = xp_net(pol);
2758 	struct xfrm_policy_queue *pq = &pol->polq;
2759 	struct flowi fl;
2760 	struct sk_buff_head list;
2761 
2762 	spin_lock(&pq->hold_queue.lock);
2763 	skb = skb_peek(&pq->hold_queue);
2764 	if (!skb) {
2765 		spin_unlock(&pq->hold_queue.lock);
2766 		goto out;
2767 	}
2768 	dst = skb_dst(skb);
2769 	sk = skb->sk;
2770 	xfrm_decode_session(skb, &fl, dst->ops->family);
2771 	spin_unlock(&pq->hold_queue.lock);
2772 
2773 	dst_hold(xfrm_dst_path(dst));
2774 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2775 	if (IS_ERR(dst))
2776 		goto purge_queue;
2777 
2778 	if (dst->flags & DST_XFRM_QUEUE) {
2779 		dst_release(dst);
2780 
2781 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2782 			goto purge_queue;
2783 
2784 		pq->timeout = pq->timeout << 1;
2785 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2786 			xfrm_pol_hold(pol);
2787 		goto out;
2788 	}
2789 
2790 	dst_release(dst);
2791 
2792 	__skb_queue_head_init(&list);
2793 
2794 	spin_lock(&pq->hold_queue.lock);
2795 	pq->timeout = 0;
2796 	skb_queue_splice_init(&pq->hold_queue, &list);
2797 	spin_unlock(&pq->hold_queue.lock);
2798 
2799 	while (!skb_queue_empty(&list)) {
2800 		skb = __skb_dequeue(&list);
2801 
2802 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2803 		dst_hold(xfrm_dst_path(skb_dst(skb)));
2804 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2805 		if (IS_ERR(dst)) {
2806 			kfree_skb(skb);
2807 			continue;
2808 		}
2809 
2810 		nf_reset_ct(skb);
2811 		skb_dst_drop(skb);
2812 		skb_dst_set(skb, dst);
2813 
2814 		dst_output(net, skb->sk, skb);
2815 	}
2816 
2817 out:
2818 	xfrm_pol_put(pol);
2819 	return;
2820 
2821 purge_queue:
2822 	pq->timeout = 0;
2823 	skb_queue_purge(&pq->hold_queue);
2824 	xfrm_pol_put(pol);
2825 }
2826 
2827 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2828 {
2829 	unsigned long sched_next;
2830 	struct dst_entry *dst = skb_dst(skb);
2831 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2832 	struct xfrm_policy *pol = xdst->pols[0];
2833 	struct xfrm_policy_queue *pq = &pol->polq;
2834 
2835 	if (unlikely(skb_fclone_busy(sk, skb))) {
2836 		kfree_skb(skb);
2837 		return 0;
2838 	}
2839 
2840 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2841 		kfree_skb(skb);
2842 		return -EAGAIN;
2843 	}
2844 
2845 	skb_dst_force(skb);
2846 
2847 	spin_lock_bh(&pq->hold_queue.lock);
2848 
2849 	if (!pq->timeout)
2850 		pq->timeout = XFRM_QUEUE_TMO_MIN;
2851 
2852 	sched_next = jiffies + pq->timeout;
2853 
2854 	if (del_timer(&pq->hold_timer)) {
2855 		if (time_before(pq->hold_timer.expires, sched_next))
2856 			sched_next = pq->hold_timer.expires;
2857 		xfrm_pol_put(pol);
2858 	}
2859 
2860 	__skb_queue_tail(&pq->hold_queue, skb);
2861 	if (!mod_timer(&pq->hold_timer, sched_next))
2862 		xfrm_pol_hold(pol);
2863 
2864 	spin_unlock_bh(&pq->hold_queue.lock);
2865 
2866 	return 0;
2867 }
2868 
2869 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2870 						 struct xfrm_flo *xflo,
2871 						 const struct flowi *fl,
2872 						 int num_xfrms,
2873 						 u16 family)
2874 {
2875 	int err;
2876 	struct net_device *dev;
2877 	struct dst_entry *dst;
2878 	struct dst_entry *dst1;
2879 	struct xfrm_dst *xdst;
2880 
2881 	xdst = xfrm_alloc_dst(net, family);
2882 	if (IS_ERR(xdst))
2883 		return xdst;
2884 
2885 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2886 	    net->xfrm.sysctl_larval_drop ||
2887 	    num_xfrms <= 0)
2888 		return xdst;
2889 
2890 	dst = xflo->dst_orig;
2891 	dst1 = &xdst->u.dst;
2892 	dst_hold(dst);
2893 	xdst->route = dst;
2894 
2895 	dst_copy_metrics(dst1, dst);
2896 
2897 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2898 	dst1->flags |= DST_XFRM_QUEUE;
2899 	dst1->lastuse = jiffies;
2900 
2901 	dst1->input = dst_discard;
2902 	dst1->output = xdst_queue_output;
2903 
2904 	dst_hold(dst);
2905 	xfrm_dst_set_child(xdst, dst);
2906 	xdst->path = dst;
2907 
2908 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2909 
2910 	err = -ENODEV;
2911 	dev = dst->dev;
2912 	if (!dev)
2913 		goto free_dst;
2914 
2915 	err = xfrm_fill_dst(xdst, dev, fl);
2916 	if (err)
2917 		goto free_dst;
2918 
2919 out:
2920 	return xdst;
2921 
2922 free_dst:
2923 	dst_release(dst1);
2924 	xdst = ERR_PTR(err);
2925 	goto out;
2926 }
2927 
2928 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2929 					   const struct flowi *fl,
2930 					   u16 family, u8 dir,
2931 					   struct xfrm_flo *xflo, u32 if_id)
2932 {
2933 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2934 	int num_pols = 0, num_xfrms = 0, err;
2935 	struct xfrm_dst *xdst;
2936 
2937 	/* Resolve policies to use if we couldn't get them from
2938 	 * previous cache entry */
2939 	num_pols = 1;
2940 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2941 	err = xfrm_expand_policies(fl, family, pols,
2942 					   &num_pols, &num_xfrms);
2943 	if (err < 0)
2944 		goto inc_error;
2945 	if (num_pols == 0)
2946 		return NULL;
2947 	if (num_xfrms <= 0)
2948 		goto make_dummy_bundle;
2949 
2950 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2951 					      xflo->dst_orig);
2952 	if (IS_ERR(xdst)) {
2953 		err = PTR_ERR(xdst);
2954 		if (err == -EREMOTE) {
2955 			xfrm_pols_put(pols, num_pols);
2956 			return NULL;
2957 		}
2958 
2959 		if (err != -EAGAIN)
2960 			goto error;
2961 		goto make_dummy_bundle;
2962 	} else if (xdst == NULL) {
2963 		num_xfrms = 0;
2964 		goto make_dummy_bundle;
2965 	}
2966 
2967 	return xdst;
2968 
2969 make_dummy_bundle:
2970 	/* We found policies, but there's no bundles to instantiate:
2971 	 * either because the policy blocks, has no transformations or
2972 	 * we could not build template (no xfrm_states).*/
2973 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2974 	if (IS_ERR(xdst)) {
2975 		xfrm_pols_put(pols, num_pols);
2976 		return ERR_CAST(xdst);
2977 	}
2978 	xdst->num_pols = num_pols;
2979 	xdst->num_xfrms = num_xfrms;
2980 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2981 
2982 	return xdst;
2983 
2984 inc_error:
2985 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2986 error:
2987 	xfrm_pols_put(pols, num_pols);
2988 	return ERR_PTR(err);
2989 }
2990 
2991 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2992 					struct dst_entry *dst_orig)
2993 {
2994 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2995 	struct dst_entry *ret;
2996 
2997 	if (!afinfo) {
2998 		dst_release(dst_orig);
2999 		return ERR_PTR(-EINVAL);
3000 	} else {
3001 		ret = afinfo->blackhole_route(net, dst_orig);
3002 	}
3003 	rcu_read_unlock();
3004 
3005 	return ret;
3006 }
3007 
3008 /* Finds/creates a bundle for given flow and if_id
3009  *
3010  * At the moment we eat a raw IP route. Mostly to speed up lookups
3011  * on interfaces with disabled IPsec.
3012  *
3013  * xfrm_lookup uses an if_id of 0 by default, and is provided for
3014  * compatibility
3015  */
3016 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3017 					struct dst_entry *dst_orig,
3018 					const struct flowi *fl,
3019 					const struct sock *sk,
3020 					int flags, u32 if_id)
3021 {
3022 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3023 	struct xfrm_dst *xdst;
3024 	struct dst_entry *dst, *route;
3025 	u16 family = dst_orig->ops->family;
3026 	u8 dir = XFRM_POLICY_OUT;
3027 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3028 
3029 	dst = NULL;
3030 	xdst = NULL;
3031 	route = NULL;
3032 
3033 	sk = sk_const_to_full_sk(sk);
3034 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3035 		num_pols = 1;
3036 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3037 						if_id);
3038 		err = xfrm_expand_policies(fl, family, pols,
3039 					   &num_pols, &num_xfrms);
3040 		if (err < 0)
3041 			goto dropdst;
3042 
3043 		if (num_pols) {
3044 			if (num_xfrms <= 0) {
3045 				drop_pols = num_pols;
3046 				goto no_transform;
3047 			}
3048 
3049 			xdst = xfrm_resolve_and_create_bundle(
3050 					pols, num_pols, fl,
3051 					family, dst_orig);
3052 
3053 			if (IS_ERR(xdst)) {
3054 				xfrm_pols_put(pols, num_pols);
3055 				err = PTR_ERR(xdst);
3056 				if (err == -EREMOTE)
3057 					goto nopol;
3058 
3059 				goto dropdst;
3060 			} else if (xdst == NULL) {
3061 				num_xfrms = 0;
3062 				drop_pols = num_pols;
3063 				goto no_transform;
3064 			}
3065 
3066 			route = xdst->route;
3067 		}
3068 	}
3069 
3070 	if (xdst == NULL) {
3071 		struct xfrm_flo xflo;
3072 
3073 		xflo.dst_orig = dst_orig;
3074 		xflo.flags = flags;
3075 
3076 		/* To accelerate a bit...  */
3077 		if ((dst_orig->flags & DST_NOXFRM) ||
3078 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
3079 			goto nopol;
3080 
3081 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3082 		if (xdst == NULL)
3083 			goto nopol;
3084 		if (IS_ERR(xdst)) {
3085 			err = PTR_ERR(xdst);
3086 			goto dropdst;
3087 		}
3088 
3089 		num_pols = xdst->num_pols;
3090 		num_xfrms = xdst->num_xfrms;
3091 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3092 		route = xdst->route;
3093 	}
3094 
3095 	dst = &xdst->u.dst;
3096 	if (route == NULL && num_xfrms > 0) {
3097 		/* The only case when xfrm_bundle_lookup() returns a
3098 		 * bundle with null route, is when the template could
3099 		 * not be resolved. It means policies are there, but
3100 		 * bundle could not be created, since we don't yet
3101 		 * have the xfrm_state's. We need to wait for KM to
3102 		 * negotiate new SA's or bail out with error.*/
3103 		if (net->xfrm.sysctl_larval_drop) {
3104 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3105 			err = -EREMOTE;
3106 			goto error;
3107 		}
3108 
3109 		err = -EAGAIN;
3110 
3111 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3112 		goto error;
3113 	}
3114 
3115 no_transform:
3116 	if (num_pols == 0)
3117 		goto nopol;
3118 
3119 	if ((flags & XFRM_LOOKUP_ICMP) &&
3120 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3121 		err = -ENOENT;
3122 		goto error;
3123 	}
3124 
3125 	for (i = 0; i < num_pols; i++)
3126 		pols[i]->curlft.use_time = ktime_get_real_seconds();
3127 
3128 	if (num_xfrms < 0) {
3129 		/* Prohibit the flow */
3130 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3131 		err = -EPERM;
3132 		goto error;
3133 	} else if (num_xfrms > 0) {
3134 		/* Flow transformed */
3135 		dst_release(dst_orig);
3136 	} else {
3137 		/* Flow passes untransformed */
3138 		dst_release(dst);
3139 		dst = dst_orig;
3140 	}
3141 ok:
3142 	xfrm_pols_put(pols, drop_pols);
3143 	if (dst && dst->xfrm &&
3144 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3145 		dst->flags |= DST_XFRM_TUNNEL;
3146 	return dst;
3147 
3148 nopol:
3149 	if (!(flags & XFRM_LOOKUP_ICMP)) {
3150 		dst = dst_orig;
3151 		goto ok;
3152 	}
3153 	err = -ENOENT;
3154 error:
3155 	dst_release(dst);
3156 dropdst:
3157 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3158 		dst_release(dst_orig);
3159 	xfrm_pols_put(pols, drop_pols);
3160 	return ERR_PTR(err);
3161 }
3162 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3163 
3164 /* Main function: finds/creates a bundle for given flow.
3165  *
3166  * At the moment we eat a raw IP route. Mostly to speed up lookups
3167  * on interfaces with disabled IPsec.
3168  */
3169 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3170 			      const struct flowi *fl, const struct sock *sk,
3171 			      int flags)
3172 {
3173 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3174 }
3175 EXPORT_SYMBOL(xfrm_lookup);
3176 
3177 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3178  * Otherwise we may send out blackholed packets.
3179  */
3180 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3181 				    const struct flowi *fl,
3182 				    const struct sock *sk, int flags)
3183 {
3184 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3185 					    flags | XFRM_LOOKUP_QUEUE |
3186 					    XFRM_LOOKUP_KEEP_DST_REF);
3187 
3188 	if (PTR_ERR(dst) == -EREMOTE)
3189 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3190 
3191 	if (IS_ERR(dst))
3192 		dst_release(dst_orig);
3193 
3194 	return dst;
3195 }
3196 EXPORT_SYMBOL(xfrm_lookup_route);
3197 
3198 static inline int
3199 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3200 {
3201 	struct sec_path *sp = skb_sec_path(skb);
3202 	struct xfrm_state *x;
3203 
3204 	if (!sp || idx < 0 || idx >= sp->len)
3205 		return 0;
3206 	x = sp->xvec[idx];
3207 	if (!x->type->reject)
3208 		return 0;
3209 	return x->type->reject(x, skb, fl);
3210 }
3211 
3212 /* When skb is transformed back to its "native" form, we have to
3213  * check policy restrictions. At the moment we make this in maximally
3214  * stupid way. Shame on me. :-) Of course, connected sockets must
3215  * have policy cached at them.
3216  */
3217 
3218 static inline int
3219 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3220 	      unsigned short family)
3221 {
3222 	if (xfrm_state_kern(x))
3223 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3224 	return	x->id.proto == tmpl->id.proto &&
3225 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3226 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3227 		x->props.mode == tmpl->mode &&
3228 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3229 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3230 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3231 		  xfrm_state_addr_cmp(tmpl, x, family));
3232 }
3233 
3234 /*
3235  * 0 or more than 0 is returned when validation is succeeded (either bypass
3236  * because of optional transport mode, or next index of the mathced secpath
3237  * state with the template.
3238  * -1 is returned when no matching template is found.
3239  * Otherwise "-2 - errored_index" is returned.
3240  */
3241 static inline int
3242 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3243 	       unsigned short family)
3244 {
3245 	int idx = start;
3246 
3247 	if (tmpl->optional) {
3248 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3249 			return start;
3250 	} else
3251 		start = -1;
3252 	for (; idx < sp->len; idx++) {
3253 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3254 			return ++idx;
3255 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3256 			if (start == -1)
3257 				start = -2-idx;
3258 			break;
3259 		}
3260 	}
3261 	return start;
3262 }
3263 
3264 static void
3265 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3266 {
3267 	const struct iphdr *iph = ip_hdr(skb);
3268 	int ihl = iph->ihl;
3269 	u8 *xprth = skb_network_header(skb) + ihl * 4;
3270 	struct flowi4 *fl4 = &fl->u.ip4;
3271 	int oif = 0;
3272 
3273 	if (skb_dst(skb) && skb_dst(skb)->dev)
3274 		oif = skb_dst(skb)->dev->ifindex;
3275 
3276 	memset(fl4, 0, sizeof(struct flowi4));
3277 	fl4->flowi4_mark = skb->mark;
3278 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3279 
3280 	fl4->flowi4_proto = iph->protocol;
3281 	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3282 	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3283 	fl4->flowi4_tos = iph->tos;
3284 
3285 	if (!ip_is_fragment(iph)) {
3286 		switch (iph->protocol) {
3287 		case IPPROTO_UDP:
3288 		case IPPROTO_UDPLITE:
3289 		case IPPROTO_TCP:
3290 		case IPPROTO_SCTP:
3291 		case IPPROTO_DCCP:
3292 			if (xprth + 4 < skb->data ||
3293 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3294 				__be16 *ports;
3295 
3296 				xprth = skb_network_header(skb) + ihl * 4;
3297 				ports = (__be16 *)xprth;
3298 
3299 				fl4->fl4_sport = ports[!!reverse];
3300 				fl4->fl4_dport = ports[!reverse];
3301 			}
3302 			break;
3303 		case IPPROTO_ICMP:
3304 			if (xprth + 2 < skb->data ||
3305 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3306 				u8 *icmp;
3307 
3308 				xprth = skb_network_header(skb) + ihl * 4;
3309 				icmp = xprth;
3310 
3311 				fl4->fl4_icmp_type = icmp[0];
3312 				fl4->fl4_icmp_code = icmp[1];
3313 			}
3314 			break;
3315 		case IPPROTO_ESP:
3316 			if (xprth + 4 < skb->data ||
3317 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3318 				__be32 *ehdr;
3319 
3320 				xprth = skb_network_header(skb) + ihl * 4;
3321 				ehdr = (__be32 *)xprth;
3322 
3323 				fl4->fl4_ipsec_spi = ehdr[0];
3324 			}
3325 			break;
3326 		case IPPROTO_AH:
3327 			if (xprth + 8 < skb->data ||
3328 			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
3329 				__be32 *ah_hdr;
3330 
3331 				xprth = skb_network_header(skb) + ihl * 4;
3332 				ah_hdr = (__be32 *)xprth;
3333 
3334 				fl4->fl4_ipsec_spi = ah_hdr[1];
3335 			}
3336 			break;
3337 		case IPPROTO_COMP:
3338 			if (xprth + 4 < skb->data ||
3339 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3340 				__be16 *ipcomp_hdr;
3341 
3342 				xprth = skb_network_header(skb) + ihl * 4;
3343 				ipcomp_hdr = (__be16 *)xprth;
3344 
3345 				fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3346 			}
3347 			break;
3348 		case IPPROTO_GRE:
3349 			if (xprth + 12 < skb->data ||
3350 			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3351 				__be16 *greflags;
3352 				__be32 *gre_hdr;
3353 
3354 				xprth = skb_network_header(skb) + ihl * 4;
3355 				greflags = (__be16 *)xprth;
3356 				gre_hdr = (__be32 *)xprth;
3357 
3358 				if (greflags[0] & GRE_KEY) {
3359 					if (greflags[0] & GRE_CSUM)
3360 						gre_hdr++;
3361 					fl4->fl4_gre_key = gre_hdr[1];
3362 				}
3363 			}
3364 			break;
3365 		default:
3366 			fl4->fl4_ipsec_spi = 0;
3367 			break;
3368 		}
3369 	}
3370 }
3371 
3372 #if IS_ENABLED(CONFIG_IPV6)
3373 static void
3374 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3375 {
3376 	struct flowi6 *fl6 = &fl->u.ip6;
3377 	int onlyproto = 0;
3378 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3379 	u32 offset = sizeof(*hdr);
3380 	struct ipv6_opt_hdr *exthdr;
3381 	const unsigned char *nh = skb_network_header(skb);
3382 	u16 nhoff = IP6CB(skb)->nhoff;
3383 	int oif = 0;
3384 	u8 nexthdr;
3385 
3386 	if (!nhoff)
3387 		nhoff = offsetof(struct ipv6hdr, nexthdr);
3388 
3389 	nexthdr = nh[nhoff];
3390 
3391 	if (skb_dst(skb) && skb_dst(skb)->dev)
3392 		oif = skb_dst(skb)->dev->ifindex;
3393 
3394 	memset(fl6, 0, sizeof(struct flowi6));
3395 	fl6->flowi6_mark = skb->mark;
3396 	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3397 
3398 	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3399 	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3400 
3401 	while (nh + offset + sizeof(*exthdr) < skb->data ||
3402 	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3403 		nh = skb_network_header(skb);
3404 		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3405 
3406 		switch (nexthdr) {
3407 		case NEXTHDR_FRAGMENT:
3408 			onlyproto = 1;
3409 			/* fall through */
3410 		case NEXTHDR_ROUTING:
3411 		case NEXTHDR_HOP:
3412 		case NEXTHDR_DEST:
3413 			offset += ipv6_optlen(exthdr);
3414 			nexthdr = exthdr->nexthdr;
3415 			exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3416 			break;
3417 		case IPPROTO_UDP:
3418 		case IPPROTO_UDPLITE:
3419 		case IPPROTO_TCP:
3420 		case IPPROTO_SCTP:
3421 		case IPPROTO_DCCP:
3422 			if (!onlyproto && (nh + offset + 4 < skb->data ||
3423 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3424 				__be16 *ports;
3425 
3426 				nh = skb_network_header(skb);
3427 				ports = (__be16 *)(nh + offset);
3428 				fl6->fl6_sport = ports[!!reverse];
3429 				fl6->fl6_dport = ports[!reverse];
3430 			}
3431 			fl6->flowi6_proto = nexthdr;
3432 			return;
3433 		case IPPROTO_ICMPV6:
3434 			if (!onlyproto && (nh + offset + 2 < skb->data ||
3435 			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3436 				u8 *icmp;
3437 
3438 				nh = skb_network_header(skb);
3439 				icmp = (u8 *)(nh + offset);
3440 				fl6->fl6_icmp_type = icmp[0];
3441 				fl6->fl6_icmp_code = icmp[1];
3442 			}
3443 			fl6->flowi6_proto = nexthdr;
3444 			return;
3445 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3446 		case IPPROTO_MH:
3447 			offset += ipv6_optlen(exthdr);
3448 			if (!onlyproto && (nh + offset + 3 < skb->data ||
3449 			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3450 				struct ip6_mh *mh;
3451 
3452 				nh = skb_network_header(skb);
3453 				mh = (struct ip6_mh *)(nh + offset);
3454 				fl6->fl6_mh_type = mh->ip6mh_type;
3455 			}
3456 			fl6->flowi6_proto = nexthdr;
3457 			return;
3458 #endif
3459 		/* XXX Why are there these headers? */
3460 		case IPPROTO_AH:
3461 		case IPPROTO_ESP:
3462 		case IPPROTO_COMP:
3463 		default:
3464 			fl6->fl6_ipsec_spi = 0;
3465 			fl6->flowi6_proto = nexthdr;
3466 			return;
3467 		}
3468 	}
3469 }
3470 #endif
3471 
3472 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3473 			  unsigned int family, int reverse)
3474 {
3475 	switch (family) {
3476 	case AF_INET:
3477 		decode_session4(skb, fl, reverse);
3478 		break;
3479 #if IS_ENABLED(CONFIG_IPV6)
3480 	case AF_INET6:
3481 		decode_session6(skb, fl, reverse);
3482 		break;
3483 #endif
3484 	default:
3485 		return -EAFNOSUPPORT;
3486 	}
3487 
3488 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3489 }
3490 EXPORT_SYMBOL(__xfrm_decode_session);
3491 
3492 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3493 {
3494 	for (; k < sp->len; k++) {
3495 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3496 			*idxp = k;
3497 			return 1;
3498 		}
3499 	}
3500 
3501 	return 0;
3502 }
3503 
3504 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3505 			unsigned short family)
3506 {
3507 	struct net *net = dev_net(skb->dev);
3508 	struct xfrm_policy *pol;
3509 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3510 	int npols = 0;
3511 	int xfrm_nr;
3512 	int pi;
3513 	int reverse;
3514 	struct flowi fl;
3515 	int xerr_idx = -1;
3516 	const struct xfrm_if_cb *ifcb;
3517 	struct sec_path *sp;
3518 	struct xfrm_if *xi;
3519 	u32 if_id = 0;
3520 
3521 	rcu_read_lock();
3522 	ifcb = xfrm_if_get_cb();
3523 
3524 	if (ifcb) {
3525 		xi = ifcb->decode_session(skb, family);
3526 		if (xi) {
3527 			if_id = xi->p.if_id;
3528 			net = xi->net;
3529 		}
3530 	}
3531 	rcu_read_unlock();
3532 
3533 	reverse = dir & ~XFRM_POLICY_MASK;
3534 	dir &= XFRM_POLICY_MASK;
3535 
3536 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3537 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3538 		return 0;
3539 	}
3540 
3541 	nf_nat_decode_session(skb, &fl, family);
3542 
3543 	/* First, check used SA against their selectors. */
3544 	sp = skb_sec_path(skb);
3545 	if (sp) {
3546 		int i;
3547 
3548 		for (i = sp->len - 1; i >= 0; i--) {
3549 			struct xfrm_state *x = sp->xvec[i];
3550 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3551 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3552 				return 0;
3553 			}
3554 		}
3555 	}
3556 
3557 	pol = NULL;
3558 	sk = sk_to_full_sk(sk);
3559 	if (sk && sk->sk_policy[dir]) {
3560 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3561 		if (IS_ERR(pol)) {
3562 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3563 			return 0;
3564 		}
3565 	}
3566 
3567 	if (!pol)
3568 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3569 
3570 	if (IS_ERR(pol)) {
3571 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3572 		return 0;
3573 	}
3574 
3575 	if (!pol) {
3576 		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3577 			xfrm_secpath_reject(xerr_idx, skb, &fl);
3578 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3579 			return 0;
3580 		}
3581 		return 1;
3582 	}
3583 
3584 	pol->curlft.use_time = ktime_get_real_seconds();
3585 
3586 	pols[0] = pol;
3587 	npols++;
3588 #ifdef CONFIG_XFRM_SUB_POLICY
3589 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3590 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3591 						    &fl, family,
3592 						    XFRM_POLICY_IN, if_id);
3593 		if (pols[1]) {
3594 			if (IS_ERR(pols[1])) {
3595 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3596 				return 0;
3597 			}
3598 			pols[1]->curlft.use_time = ktime_get_real_seconds();
3599 			npols++;
3600 		}
3601 	}
3602 #endif
3603 
3604 	if (pol->action == XFRM_POLICY_ALLOW) {
3605 		static struct sec_path dummy;
3606 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3607 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3608 		struct xfrm_tmpl **tpp = tp;
3609 		int ti = 0;
3610 		int i, k;
3611 
3612 		sp = skb_sec_path(skb);
3613 		if (!sp)
3614 			sp = &dummy;
3615 
3616 		for (pi = 0; pi < npols; pi++) {
3617 			if (pols[pi] != pol &&
3618 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3619 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3620 				goto reject;
3621 			}
3622 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3623 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3624 				goto reject_error;
3625 			}
3626 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3627 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3628 		}
3629 		xfrm_nr = ti;
3630 		if (npols > 1) {
3631 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3632 			tpp = stp;
3633 		}
3634 
3635 		/* For each tunnel xfrm, find the first matching tmpl.
3636 		 * For each tmpl before that, find corresponding xfrm.
3637 		 * Order is _important_. Later we will implement
3638 		 * some barriers, but at the moment barriers
3639 		 * are implied between each two transformations.
3640 		 */
3641 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3642 			k = xfrm_policy_ok(tpp[i], sp, k, family);
3643 			if (k < 0) {
3644 				if (k < -1)
3645 					/* "-2 - errored_index" returned */
3646 					xerr_idx = -(2+k);
3647 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3648 				goto reject;
3649 			}
3650 		}
3651 
3652 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3653 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3654 			goto reject;
3655 		}
3656 
3657 		xfrm_pols_put(pols, npols);
3658 		return 1;
3659 	}
3660 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3661 
3662 reject:
3663 	xfrm_secpath_reject(xerr_idx, skb, &fl);
3664 reject_error:
3665 	xfrm_pols_put(pols, npols);
3666 	return 0;
3667 }
3668 EXPORT_SYMBOL(__xfrm_policy_check);
3669 
3670 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3671 {
3672 	struct net *net = dev_net(skb->dev);
3673 	struct flowi fl;
3674 	struct dst_entry *dst;
3675 	int res = 1;
3676 
3677 	if (xfrm_decode_session(skb, &fl, family) < 0) {
3678 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3679 		return 0;
3680 	}
3681 
3682 	skb_dst_force(skb);
3683 	if (!skb_dst(skb)) {
3684 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3685 		return 0;
3686 	}
3687 
3688 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3689 	if (IS_ERR(dst)) {
3690 		res = 0;
3691 		dst = NULL;
3692 	}
3693 	skb_dst_set(skb, dst);
3694 	return res;
3695 }
3696 EXPORT_SYMBOL(__xfrm_route_forward);
3697 
3698 /* Optimize later using cookies and generation ids. */
3699 
3700 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3701 {
3702 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3703 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3704 	 * get validated by dst_ops->check on every use.  We do this
3705 	 * because when a normal route referenced by an XFRM dst is
3706 	 * obsoleted we do not go looking around for all parent
3707 	 * referencing XFRM dsts so that we can invalidate them.  It
3708 	 * is just too much work.  Instead we make the checks here on
3709 	 * every use.  For example:
3710 	 *
3711 	 *	XFRM dst A --> IPv4 dst X
3712 	 *
3713 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3714 	 * in this example).  If X is marked obsolete, "A" will not
3715 	 * notice.  That's what we are validating here via the
3716 	 * stale_bundle() check.
3717 	 *
3718 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3719 	 * be marked on it.
3720 	 * This will force stale_bundle() to fail on any xdst bundle with
3721 	 * this dst linked in it.
3722 	 */
3723 	if (dst->obsolete < 0 && !stale_bundle(dst))
3724 		return dst;
3725 
3726 	return NULL;
3727 }
3728 
3729 static int stale_bundle(struct dst_entry *dst)
3730 {
3731 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3732 }
3733 
3734 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3735 {
3736 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3737 		dst->dev = dev_net(dev)->loopback_dev;
3738 		dev_hold(dst->dev);
3739 		dev_put(dev);
3740 	}
3741 }
3742 EXPORT_SYMBOL(xfrm_dst_ifdown);
3743 
3744 static void xfrm_link_failure(struct sk_buff *skb)
3745 {
3746 	/* Impossible. Such dst must be popped before reaches point of failure. */
3747 }
3748 
3749 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3750 {
3751 	if (dst) {
3752 		if (dst->obsolete) {
3753 			dst_release(dst);
3754 			dst = NULL;
3755 		}
3756 	}
3757 	return dst;
3758 }
3759 
3760 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3761 {
3762 	while (nr--) {
3763 		struct xfrm_dst *xdst = bundle[nr];
3764 		u32 pmtu, route_mtu_cached;
3765 		struct dst_entry *dst;
3766 
3767 		dst = &xdst->u.dst;
3768 		pmtu = dst_mtu(xfrm_dst_child(dst));
3769 		xdst->child_mtu_cached = pmtu;
3770 
3771 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3772 
3773 		route_mtu_cached = dst_mtu(xdst->route);
3774 		xdst->route_mtu_cached = route_mtu_cached;
3775 
3776 		if (pmtu > route_mtu_cached)
3777 			pmtu = route_mtu_cached;
3778 
3779 		dst_metric_set(dst, RTAX_MTU, pmtu);
3780 	}
3781 }
3782 
3783 /* Check that the bundle accepts the flow and its components are
3784  * still valid.
3785  */
3786 
3787 static int xfrm_bundle_ok(struct xfrm_dst *first)
3788 {
3789 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3790 	struct dst_entry *dst = &first->u.dst;
3791 	struct xfrm_dst *xdst;
3792 	int start_from, nr;
3793 	u32 mtu;
3794 
3795 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3796 	    (dst->dev && !netif_running(dst->dev)))
3797 		return 0;
3798 
3799 	if (dst->flags & DST_XFRM_QUEUE)
3800 		return 1;
3801 
3802 	start_from = nr = 0;
3803 	do {
3804 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3805 
3806 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3807 			return 0;
3808 		if (xdst->xfrm_genid != dst->xfrm->genid)
3809 			return 0;
3810 		if (xdst->num_pols > 0 &&
3811 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3812 			return 0;
3813 
3814 		bundle[nr++] = xdst;
3815 
3816 		mtu = dst_mtu(xfrm_dst_child(dst));
3817 		if (xdst->child_mtu_cached != mtu) {
3818 			start_from = nr;
3819 			xdst->child_mtu_cached = mtu;
3820 		}
3821 
3822 		if (!dst_check(xdst->route, xdst->route_cookie))
3823 			return 0;
3824 		mtu = dst_mtu(xdst->route);
3825 		if (xdst->route_mtu_cached != mtu) {
3826 			start_from = nr;
3827 			xdst->route_mtu_cached = mtu;
3828 		}
3829 
3830 		dst = xfrm_dst_child(dst);
3831 	} while (dst->xfrm);
3832 
3833 	if (likely(!start_from))
3834 		return 1;
3835 
3836 	xdst = bundle[start_from - 1];
3837 	mtu = xdst->child_mtu_cached;
3838 	while (start_from--) {
3839 		dst = &xdst->u.dst;
3840 
3841 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3842 		if (mtu > xdst->route_mtu_cached)
3843 			mtu = xdst->route_mtu_cached;
3844 		dst_metric_set(dst, RTAX_MTU, mtu);
3845 		if (!start_from)
3846 			break;
3847 
3848 		xdst = bundle[start_from - 1];
3849 		xdst->child_mtu_cached = mtu;
3850 	}
3851 
3852 	return 1;
3853 }
3854 
3855 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3856 {
3857 	return dst_metric_advmss(xfrm_dst_path(dst));
3858 }
3859 
3860 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3861 {
3862 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3863 
3864 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3865 }
3866 
3867 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3868 					const void *daddr)
3869 {
3870 	while (dst->xfrm) {
3871 		const struct xfrm_state *xfrm = dst->xfrm;
3872 
3873 		dst = xfrm_dst_child(dst);
3874 
3875 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3876 			continue;
3877 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3878 			daddr = xfrm->coaddr;
3879 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3880 			daddr = &xfrm->id.daddr;
3881 	}
3882 	return daddr;
3883 }
3884 
3885 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3886 					   struct sk_buff *skb,
3887 					   const void *daddr)
3888 {
3889 	const struct dst_entry *path = xfrm_dst_path(dst);
3890 
3891 	if (!skb)
3892 		daddr = xfrm_get_dst_nexthop(dst, daddr);
3893 	return path->ops->neigh_lookup(path, skb, daddr);
3894 }
3895 
3896 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3897 {
3898 	const struct dst_entry *path = xfrm_dst_path(dst);
3899 
3900 	daddr = xfrm_get_dst_nexthop(dst, daddr);
3901 	path->ops->confirm_neigh(path, daddr);
3902 }
3903 
3904 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3905 {
3906 	int err = 0;
3907 
3908 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3909 		return -EAFNOSUPPORT;
3910 
3911 	spin_lock(&xfrm_policy_afinfo_lock);
3912 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3913 		err = -EEXIST;
3914 	else {
3915 		struct dst_ops *dst_ops = afinfo->dst_ops;
3916 		if (likely(dst_ops->kmem_cachep == NULL))
3917 			dst_ops->kmem_cachep = xfrm_dst_cache;
3918 		if (likely(dst_ops->check == NULL))
3919 			dst_ops->check = xfrm_dst_check;
3920 		if (likely(dst_ops->default_advmss == NULL))
3921 			dst_ops->default_advmss = xfrm_default_advmss;
3922 		if (likely(dst_ops->mtu == NULL))
3923 			dst_ops->mtu = xfrm_mtu;
3924 		if (likely(dst_ops->negative_advice == NULL))
3925 			dst_ops->negative_advice = xfrm_negative_advice;
3926 		if (likely(dst_ops->link_failure == NULL))
3927 			dst_ops->link_failure = xfrm_link_failure;
3928 		if (likely(dst_ops->neigh_lookup == NULL))
3929 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3930 		if (likely(!dst_ops->confirm_neigh))
3931 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3932 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3933 	}
3934 	spin_unlock(&xfrm_policy_afinfo_lock);
3935 
3936 	return err;
3937 }
3938 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3939 
3940 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3941 {
3942 	struct dst_ops *dst_ops = afinfo->dst_ops;
3943 	int i;
3944 
3945 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3946 		if (xfrm_policy_afinfo[i] != afinfo)
3947 			continue;
3948 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3949 		break;
3950 	}
3951 
3952 	synchronize_rcu();
3953 
3954 	dst_ops->kmem_cachep = NULL;
3955 	dst_ops->check = NULL;
3956 	dst_ops->negative_advice = NULL;
3957 	dst_ops->link_failure = NULL;
3958 }
3959 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3960 
3961 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3962 {
3963 	spin_lock(&xfrm_if_cb_lock);
3964 	rcu_assign_pointer(xfrm_if_cb, ifcb);
3965 	spin_unlock(&xfrm_if_cb_lock);
3966 }
3967 EXPORT_SYMBOL(xfrm_if_register_cb);
3968 
3969 void xfrm_if_unregister_cb(void)
3970 {
3971 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3972 	synchronize_rcu();
3973 }
3974 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3975 
3976 #ifdef CONFIG_XFRM_STATISTICS
3977 static int __net_init xfrm_statistics_init(struct net *net)
3978 {
3979 	int rv;
3980 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3981 	if (!net->mib.xfrm_statistics)
3982 		return -ENOMEM;
3983 	rv = xfrm_proc_init(net);
3984 	if (rv < 0)
3985 		free_percpu(net->mib.xfrm_statistics);
3986 	return rv;
3987 }
3988 
3989 static void xfrm_statistics_fini(struct net *net)
3990 {
3991 	xfrm_proc_fini(net);
3992 	free_percpu(net->mib.xfrm_statistics);
3993 }
3994 #else
3995 static int __net_init xfrm_statistics_init(struct net *net)
3996 {
3997 	return 0;
3998 }
3999 
4000 static void xfrm_statistics_fini(struct net *net)
4001 {
4002 }
4003 #endif
4004 
4005 static int __net_init xfrm_policy_init(struct net *net)
4006 {
4007 	unsigned int hmask, sz;
4008 	int dir, err;
4009 
4010 	if (net_eq(net, &init_net)) {
4011 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4012 					   sizeof(struct xfrm_dst),
4013 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4014 					   NULL);
4015 		err = rhashtable_init(&xfrm_policy_inexact_table,
4016 				      &xfrm_pol_inexact_params);
4017 		BUG_ON(err);
4018 	}
4019 
4020 	hmask = 8 - 1;
4021 	sz = (hmask+1) * sizeof(struct hlist_head);
4022 
4023 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4024 	if (!net->xfrm.policy_byidx)
4025 		goto out_byidx;
4026 	net->xfrm.policy_idx_hmask = hmask;
4027 
4028 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4029 		struct xfrm_policy_hash *htab;
4030 
4031 		net->xfrm.policy_count[dir] = 0;
4032 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4033 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4034 
4035 		htab = &net->xfrm.policy_bydst[dir];
4036 		htab->table = xfrm_hash_alloc(sz);
4037 		if (!htab->table)
4038 			goto out_bydst;
4039 		htab->hmask = hmask;
4040 		htab->dbits4 = 32;
4041 		htab->sbits4 = 32;
4042 		htab->dbits6 = 128;
4043 		htab->sbits6 = 128;
4044 	}
4045 	net->xfrm.policy_hthresh.lbits4 = 32;
4046 	net->xfrm.policy_hthresh.rbits4 = 32;
4047 	net->xfrm.policy_hthresh.lbits6 = 128;
4048 	net->xfrm.policy_hthresh.rbits6 = 128;
4049 
4050 	seqlock_init(&net->xfrm.policy_hthresh.lock);
4051 
4052 	INIT_LIST_HEAD(&net->xfrm.policy_all);
4053 	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4054 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4055 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4056 	return 0;
4057 
4058 out_bydst:
4059 	for (dir--; dir >= 0; dir--) {
4060 		struct xfrm_policy_hash *htab;
4061 
4062 		htab = &net->xfrm.policy_bydst[dir];
4063 		xfrm_hash_free(htab->table, sz);
4064 	}
4065 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4066 out_byidx:
4067 	return -ENOMEM;
4068 }
4069 
4070 static void xfrm_policy_fini(struct net *net)
4071 {
4072 	struct xfrm_pol_inexact_bin *b, *t;
4073 	unsigned int sz;
4074 	int dir;
4075 
4076 	flush_work(&net->xfrm.policy_hash_work);
4077 #ifdef CONFIG_XFRM_SUB_POLICY
4078 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4079 #endif
4080 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4081 
4082 	WARN_ON(!list_empty(&net->xfrm.policy_all));
4083 
4084 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4085 		struct xfrm_policy_hash *htab;
4086 
4087 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4088 
4089 		htab = &net->xfrm.policy_bydst[dir];
4090 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4091 		WARN_ON(!hlist_empty(htab->table));
4092 		xfrm_hash_free(htab->table, sz);
4093 	}
4094 
4095 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4096 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4097 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4098 
4099 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4100 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4101 		__xfrm_policy_inexact_prune_bin(b, true);
4102 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4103 }
4104 
4105 static int __net_init xfrm_net_init(struct net *net)
4106 {
4107 	int rv;
4108 
4109 	/* Initialize the per-net locks here */
4110 	spin_lock_init(&net->xfrm.xfrm_state_lock);
4111 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4112 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4113 
4114 	rv = xfrm_statistics_init(net);
4115 	if (rv < 0)
4116 		goto out_statistics;
4117 	rv = xfrm_state_init(net);
4118 	if (rv < 0)
4119 		goto out_state;
4120 	rv = xfrm_policy_init(net);
4121 	if (rv < 0)
4122 		goto out_policy;
4123 	rv = xfrm_sysctl_init(net);
4124 	if (rv < 0)
4125 		goto out_sysctl;
4126 
4127 	return 0;
4128 
4129 out_sysctl:
4130 	xfrm_policy_fini(net);
4131 out_policy:
4132 	xfrm_state_fini(net);
4133 out_state:
4134 	xfrm_statistics_fini(net);
4135 out_statistics:
4136 	return rv;
4137 }
4138 
4139 static void __net_exit xfrm_net_exit(struct net *net)
4140 {
4141 	xfrm_sysctl_fini(net);
4142 	xfrm_policy_fini(net);
4143 	xfrm_state_fini(net);
4144 	xfrm_statistics_fini(net);
4145 }
4146 
4147 static struct pernet_operations __net_initdata xfrm_net_ops = {
4148 	.init = xfrm_net_init,
4149 	.exit = xfrm_net_exit,
4150 };
4151 
4152 void __init xfrm_init(void)
4153 {
4154 	register_pernet_subsys(&xfrm_net_ops);
4155 	xfrm_dev_init();
4156 	seqcount_init(&xfrm_policy_hash_generation);
4157 	xfrm_input_init();
4158 
4159 #ifdef CONFIG_INET_ESPINTCP
4160 	espintcp_init();
4161 #endif
4162 
4163 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4164 	synchronize_rcu();
4165 }
4166 
4167 #ifdef CONFIG_AUDITSYSCALL
4168 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4169 					 struct audit_buffer *audit_buf)
4170 {
4171 	struct xfrm_sec_ctx *ctx = xp->security;
4172 	struct xfrm_selector *sel = &xp->selector;
4173 
4174 	if (ctx)
4175 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4176 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4177 
4178 	switch (sel->family) {
4179 	case AF_INET:
4180 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4181 		if (sel->prefixlen_s != 32)
4182 			audit_log_format(audit_buf, " src_prefixlen=%d",
4183 					 sel->prefixlen_s);
4184 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4185 		if (sel->prefixlen_d != 32)
4186 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4187 					 sel->prefixlen_d);
4188 		break;
4189 	case AF_INET6:
4190 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4191 		if (sel->prefixlen_s != 128)
4192 			audit_log_format(audit_buf, " src_prefixlen=%d",
4193 					 sel->prefixlen_s);
4194 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4195 		if (sel->prefixlen_d != 128)
4196 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4197 					 sel->prefixlen_d);
4198 		break;
4199 	}
4200 }
4201 
4202 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4203 {
4204 	struct audit_buffer *audit_buf;
4205 
4206 	audit_buf = xfrm_audit_start("SPD-add");
4207 	if (audit_buf == NULL)
4208 		return;
4209 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4210 	audit_log_format(audit_buf, " res=%u", result);
4211 	xfrm_audit_common_policyinfo(xp, audit_buf);
4212 	audit_log_end(audit_buf);
4213 }
4214 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4215 
4216 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4217 			      bool task_valid)
4218 {
4219 	struct audit_buffer *audit_buf;
4220 
4221 	audit_buf = xfrm_audit_start("SPD-delete");
4222 	if (audit_buf == NULL)
4223 		return;
4224 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4225 	audit_log_format(audit_buf, " res=%u", result);
4226 	xfrm_audit_common_policyinfo(xp, audit_buf);
4227 	audit_log_end(audit_buf);
4228 }
4229 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4230 #endif
4231 
4232 #ifdef CONFIG_XFRM_MIGRATE
4233 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4234 					const struct xfrm_selector *sel_tgt)
4235 {
4236 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4237 		if (sel_tgt->family == sel_cmp->family &&
4238 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4239 				    sel_cmp->family) &&
4240 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4241 				    sel_cmp->family) &&
4242 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4243 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4244 			return true;
4245 		}
4246 	} else {
4247 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4248 			return true;
4249 		}
4250 	}
4251 	return false;
4252 }
4253 
4254 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4255 						    u8 dir, u8 type, struct net *net)
4256 {
4257 	struct xfrm_policy *pol, *ret = NULL;
4258 	struct hlist_head *chain;
4259 	u32 priority = ~0U;
4260 
4261 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4262 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4263 	hlist_for_each_entry(pol, chain, bydst) {
4264 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4265 		    pol->type == type) {
4266 			ret = pol;
4267 			priority = ret->priority;
4268 			break;
4269 		}
4270 	}
4271 	chain = &net->xfrm.policy_inexact[dir];
4272 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4273 		if ((pol->priority >= priority) && ret)
4274 			break;
4275 
4276 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4277 		    pol->type == type) {
4278 			ret = pol;
4279 			break;
4280 		}
4281 	}
4282 
4283 	xfrm_pol_hold(ret);
4284 
4285 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4286 
4287 	return ret;
4288 }
4289 
4290 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4291 {
4292 	int match = 0;
4293 
4294 	if (t->mode == m->mode && t->id.proto == m->proto &&
4295 	    (m->reqid == 0 || t->reqid == m->reqid)) {
4296 		switch (t->mode) {
4297 		case XFRM_MODE_TUNNEL:
4298 		case XFRM_MODE_BEET:
4299 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4300 					    m->old_family) &&
4301 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4302 					    m->old_family)) {
4303 				match = 1;
4304 			}
4305 			break;
4306 		case XFRM_MODE_TRANSPORT:
4307 			/* in case of transport mode, template does not store
4308 			   any IP addresses, hence we just compare mode and
4309 			   protocol */
4310 			match = 1;
4311 			break;
4312 		default:
4313 			break;
4314 		}
4315 	}
4316 	return match;
4317 }
4318 
4319 /* update endpoint address(es) of template(s) */
4320 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4321 			       struct xfrm_migrate *m, int num_migrate)
4322 {
4323 	struct xfrm_migrate *mp;
4324 	int i, j, n = 0;
4325 
4326 	write_lock_bh(&pol->lock);
4327 	if (unlikely(pol->walk.dead)) {
4328 		/* target policy has been deleted */
4329 		write_unlock_bh(&pol->lock);
4330 		return -ENOENT;
4331 	}
4332 
4333 	for (i = 0; i < pol->xfrm_nr; i++) {
4334 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4335 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4336 				continue;
4337 			n++;
4338 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4339 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4340 				continue;
4341 			/* update endpoints */
4342 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4343 			       sizeof(pol->xfrm_vec[i].id.daddr));
4344 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4345 			       sizeof(pol->xfrm_vec[i].saddr));
4346 			pol->xfrm_vec[i].encap_family = mp->new_family;
4347 			/* flush bundles */
4348 			atomic_inc(&pol->genid);
4349 		}
4350 	}
4351 
4352 	write_unlock_bh(&pol->lock);
4353 
4354 	if (!n)
4355 		return -ENODATA;
4356 
4357 	return 0;
4358 }
4359 
4360 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4361 {
4362 	int i, j;
4363 
4364 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4365 		return -EINVAL;
4366 
4367 	for (i = 0; i < num_migrate; i++) {
4368 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4369 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4370 			return -EINVAL;
4371 
4372 		/* check if there is any duplicated entry */
4373 		for (j = i + 1; j < num_migrate; j++) {
4374 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4375 				    sizeof(m[i].old_daddr)) &&
4376 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4377 				    sizeof(m[i].old_saddr)) &&
4378 			    m[i].proto == m[j].proto &&
4379 			    m[i].mode == m[j].mode &&
4380 			    m[i].reqid == m[j].reqid &&
4381 			    m[i].old_family == m[j].old_family)
4382 				return -EINVAL;
4383 		}
4384 	}
4385 
4386 	return 0;
4387 }
4388 
4389 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4390 		 struct xfrm_migrate *m, int num_migrate,
4391 		 struct xfrm_kmaddress *k, struct net *net,
4392 		 struct xfrm_encap_tmpl *encap)
4393 {
4394 	int i, err, nx_cur = 0, nx_new = 0;
4395 	struct xfrm_policy *pol = NULL;
4396 	struct xfrm_state *x, *xc;
4397 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4398 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4399 	struct xfrm_migrate *mp;
4400 
4401 	/* Stage 0 - sanity checks */
4402 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4403 		goto out;
4404 
4405 	if (dir >= XFRM_POLICY_MAX) {
4406 		err = -EINVAL;
4407 		goto out;
4408 	}
4409 
4410 	/* Stage 1 - find policy */
4411 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4412 		err = -ENOENT;
4413 		goto out;
4414 	}
4415 
4416 	/* Stage 2 - find and update state(s) */
4417 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4418 		if ((x = xfrm_migrate_state_find(mp, net))) {
4419 			x_cur[nx_cur] = x;
4420 			nx_cur++;
4421 			xc = xfrm_state_migrate(x, mp, encap);
4422 			if (xc) {
4423 				x_new[nx_new] = xc;
4424 				nx_new++;
4425 			} else {
4426 				err = -ENODATA;
4427 				goto restore_state;
4428 			}
4429 		}
4430 	}
4431 
4432 	/* Stage 3 - update policy */
4433 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4434 		goto restore_state;
4435 
4436 	/* Stage 4 - delete old state(s) */
4437 	if (nx_cur) {
4438 		xfrm_states_put(x_cur, nx_cur);
4439 		xfrm_states_delete(x_cur, nx_cur);
4440 	}
4441 
4442 	/* Stage 5 - announce */
4443 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4444 
4445 	xfrm_pol_put(pol);
4446 
4447 	return 0;
4448 out:
4449 	return err;
4450 
4451 restore_state:
4452 	if (pol)
4453 		xfrm_pol_put(pol);
4454 	if (nx_cur)
4455 		xfrm_states_put(x_cur, nx_cur);
4456 	if (nx_new)
4457 		xfrm_states_delete(x_new, nx_new);
4458 
4459 	return err;
4460 }
4461 EXPORT_SYMBOL(xfrm_migrate);
4462 #endif
4463