xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision 06b72824)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_policy.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	Kazunori MIYAZAWA @USAGI
11  * 	YOSHIFUJI Hideaki
12  * 		Split up af-specific portion
13  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14  *
15  */
16 
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/xfrm.h>
35 #include <net/ip.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
37 #include <net/mip6.h>
38 #endif
39 #ifdef CONFIG_XFRM_STATISTICS
40 #include <net/snmp.h>
41 #endif
42 #ifdef CONFIG_INET_ESPINTCP
43 #include <net/espintcp.h>
44 #endif
45 
46 #include "xfrm_hash.h"
47 
48 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
49 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
50 #define XFRM_MAX_QUEUE_LEN	100
51 
52 struct xfrm_flo {
53 	struct dst_entry *dst_orig;
54 	u8 flags;
55 };
56 
57 /* prefixes smaller than this are stored in lists, not trees. */
58 #define INEXACT_PREFIXLEN_IPV4	16
59 #define INEXACT_PREFIXLEN_IPV6	48
60 
61 struct xfrm_pol_inexact_node {
62 	struct rb_node node;
63 	union {
64 		xfrm_address_t addr;
65 		struct rcu_head rcu;
66 	};
67 	u8 prefixlen;
68 
69 	struct rb_root root;
70 
71 	/* the policies matching this node, can be empty list */
72 	struct hlist_head hhead;
73 };
74 
75 /* xfrm inexact policy search tree:
76  * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
77  *  |
78  * +---- root_d: sorted by daddr:prefix
79  * |                 |
80  * |        xfrm_pol_inexact_node
81  * |                 |
82  * |                 +- root: sorted by saddr/prefix
83  * |                 |              |
84  * |                 |         xfrm_pol_inexact_node
85  * |                 |              |
86  * |                 |              + root: unused
87  * |                 |              |
88  * |                 |              + hhead: saddr:daddr policies
89  * |                 |
90  * |                 +- coarse policies and all any:daddr policies
91  * |
92  * +---- root_s: sorted by saddr:prefix
93  * |                 |
94  * |        xfrm_pol_inexact_node
95  * |                 |
96  * |                 + root: unused
97  * |                 |
98  * |                 + hhead: saddr:any policies
99  * |
100  * +---- coarse policies and all any:any policies
101  *
102  * Lookups return four candidate lists:
103  * 1. any:any list from top-level xfrm_pol_inexact_bin
104  * 2. any:daddr list from daddr tree
105  * 3. saddr:daddr list from 2nd level daddr tree
106  * 4. saddr:any list from saddr tree
107  *
108  * This result set then needs to be searched for the policy with
109  * the lowest priority.  If two results have same prio, youngest one wins.
110  */
111 
112 struct xfrm_pol_inexact_key {
113 	possible_net_t net;
114 	u32 if_id;
115 	u16 family;
116 	u8 dir, type;
117 };
118 
119 struct xfrm_pol_inexact_bin {
120 	struct xfrm_pol_inexact_key k;
121 	struct rhash_head head;
122 	/* list containing '*:*' policies */
123 	struct hlist_head hhead;
124 
125 	seqcount_t count;
126 	/* tree sorted by daddr/prefix */
127 	struct rb_root root_d;
128 
129 	/* tree sorted by saddr/prefix */
130 	struct rb_root root_s;
131 
132 	/* slow path below */
133 	struct list_head inexact_bins;
134 	struct rcu_head rcu;
135 };
136 
137 enum xfrm_pol_inexact_candidate_type {
138 	XFRM_POL_CAND_BOTH,
139 	XFRM_POL_CAND_SADDR,
140 	XFRM_POL_CAND_DADDR,
141 	XFRM_POL_CAND_ANY,
142 
143 	XFRM_POL_CAND_MAX,
144 };
145 
146 struct xfrm_pol_inexact_candidates {
147 	struct hlist_head *res[XFRM_POL_CAND_MAX];
148 };
149 
150 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
151 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
152 
153 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
154 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
155 						__read_mostly;
156 
157 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
158 static __read_mostly seqcount_t xfrm_policy_hash_generation;
159 
160 static struct rhashtable xfrm_policy_inexact_table;
161 static const struct rhashtable_params xfrm_pol_inexact_params;
162 
163 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
164 static int stale_bundle(struct dst_entry *dst);
165 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
166 static void xfrm_policy_queue_process(struct timer_list *t);
167 
168 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
169 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
170 						int dir);
171 
172 static struct xfrm_pol_inexact_bin *
173 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
174 			   u32 if_id);
175 
176 static struct xfrm_pol_inexact_bin *
177 xfrm_policy_inexact_lookup_rcu(struct net *net,
178 			       u8 type, u16 family, u8 dir, u32 if_id);
179 static struct xfrm_policy *
180 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
181 			bool excl);
182 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
183 					    struct xfrm_policy *policy);
184 
185 static bool
186 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
187 				    struct xfrm_pol_inexact_bin *b,
188 				    const xfrm_address_t *saddr,
189 				    const xfrm_address_t *daddr);
190 
191 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
192 {
193 	return refcount_inc_not_zero(&policy->refcnt);
194 }
195 
196 static inline bool
197 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
198 {
199 	const struct flowi4 *fl4 = &fl->u.ip4;
200 
201 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
202 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
203 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
204 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
205 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
206 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
207 }
208 
209 static inline bool
210 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
211 {
212 	const struct flowi6 *fl6 = &fl->u.ip6;
213 
214 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
215 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
216 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
217 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
218 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
219 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
220 }
221 
222 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
223 			 unsigned short family)
224 {
225 	switch (family) {
226 	case AF_INET:
227 		return __xfrm4_selector_match(sel, fl);
228 	case AF_INET6:
229 		return __xfrm6_selector_match(sel, fl);
230 	}
231 	return false;
232 }
233 
234 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
235 {
236 	const struct xfrm_policy_afinfo *afinfo;
237 
238 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
239 		return NULL;
240 	rcu_read_lock();
241 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
242 	if (unlikely(!afinfo))
243 		rcu_read_unlock();
244 	return afinfo;
245 }
246 
247 /* Called with rcu_read_lock(). */
248 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
249 {
250 	return rcu_dereference(xfrm_if_cb);
251 }
252 
253 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
254 				    const xfrm_address_t *saddr,
255 				    const xfrm_address_t *daddr,
256 				    int family, u32 mark)
257 {
258 	const struct xfrm_policy_afinfo *afinfo;
259 	struct dst_entry *dst;
260 
261 	afinfo = xfrm_policy_get_afinfo(family);
262 	if (unlikely(afinfo == NULL))
263 		return ERR_PTR(-EAFNOSUPPORT);
264 
265 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
266 
267 	rcu_read_unlock();
268 
269 	return dst;
270 }
271 EXPORT_SYMBOL(__xfrm_dst_lookup);
272 
273 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
274 						int tos, int oif,
275 						xfrm_address_t *prev_saddr,
276 						xfrm_address_t *prev_daddr,
277 						int family, u32 mark)
278 {
279 	struct net *net = xs_net(x);
280 	xfrm_address_t *saddr = &x->props.saddr;
281 	xfrm_address_t *daddr = &x->id.daddr;
282 	struct dst_entry *dst;
283 
284 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
285 		saddr = x->coaddr;
286 		daddr = prev_daddr;
287 	}
288 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
289 		saddr = prev_saddr;
290 		daddr = x->coaddr;
291 	}
292 
293 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
294 
295 	if (!IS_ERR(dst)) {
296 		if (prev_saddr != saddr)
297 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
298 		if (prev_daddr != daddr)
299 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
300 	}
301 
302 	return dst;
303 }
304 
305 static inline unsigned long make_jiffies(long secs)
306 {
307 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
308 		return MAX_SCHEDULE_TIMEOUT-1;
309 	else
310 		return secs*HZ;
311 }
312 
313 static void xfrm_policy_timer(struct timer_list *t)
314 {
315 	struct xfrm_policy *xp = from_timer(xp, t, timer);
316 	time64_t now = ktime_get_real_seconds();
317 	time64_t next = TIME64_MAX;
318 	int warn = 0;
319 	int dir;
320 
321 	read_lock(&xp->lock);
322 
323 	if (unlikely(xp->walk.dead))
324 		goto out;
325 
326 	dir = xfrm_policy_id2dir(xp->index);
327 
328 	if (xp->lft.hard_add_expires_seconds) {
329 		time64_t tmo = xp->lft.hard_add_expires_seconds +
330 			xp->curlft.add_time - now;
331 		if (tmo <= 0)
332 			goto expired;
333 		if (tmo < next)
334 			next = tmo;
335 	}
336 	if (xp->lft.hard_use_expires_seconds) {
337 		time64_t tmo = xp->lft.hard_use_expires_seconds +
338 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
339 		if (tmo <= 0)
340 			goto expired;
341 		if (tmo < next)
342 			next = tmo;
343 	}
344 	if (xp->lft.soft_add_expires_seconds) {
345 		time64_t tmo = xp->lft.soft_add_expires_seconds +
346 			xp->curlft.add_time - now;
347 		if (tmo <= 0) {
348 			warn = 1;
349 			tmo = XFRM_KM_TIMEOUT;
350 		}
351 		if (tmo < next)
352 			next = tmo;
353 	}
354 	if (xp->lft.soft_use_expires_seconds) {
355 		time64_t tmo = xp->lft.soft_use_expires_seconds +
356 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
357 		if (tmo <= 0) {
358 			warn = 1;
359 			tmo = XFRM_KM_TIMEOUT;
360 		}
361 		if (tmo < next)
362 			next = tmo;
363 	}
364 
365 	if (warn)
366 		km_policy_expired(xp, dir, 0, 0);
367 	if (next != TIME64_MAX &&
368 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
369 		xfrm_pol_hold(xp);
370 
371 out:
372 	read_unlock(&xp->lock);
373 	xfrm_pol_put(xp);
374 	return;
375 
376 expired:
377 	read_unlock(&xp->lock);
378 	if (!xfrm_policy_delete(xp, dir))
379 		km_policy_expired(xp, dir, 1, 0);
380 	xfrm_pol_put(xp);
381 }
382 
383 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
384  * SPD calls.
385  */
386 
387 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
388 {
389 	struct xfrm_policy *policy;
390 
391 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
392 
393 	if (policy) {
394 		write_pnet(&policy->xp_net, net);
395 		INIT_LIST_HEAD(&policy->walk.all);
396 		INIT_HLIST_NODE(&policy->bydst_inexact_list);
397 		INIT_HLIST_NODE(&policy->bydst);
398 		INIT_HLIST_NODE(&policy->byidx);
399 		rwlock_init(&policy->lock);
400 		refcount_set(&policy->refcnt, 1);
401 		skb_queue_head_init(&policy->polq.hold_queue);
402 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
403 		timer_setup(&policy->polq.hold_timer,
404 			    xfrm_policy_queue_process, 0);
405 	}
406 	return policy;
407 }
408 EXPORT_SYMBOL(xfrm_policy_alloc);
409 
410 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
411 {
412 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
413 
414 	security_xfrm_policy_free(policy->security);
415 	kfree(policy);
416 }
417 
418 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
419 
420 void xfrm_policy_destroy(struct xfrm_policy *policy)
421 {
422 	BUG_ON(!policy->walk.dead);
423 
424 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
425 		BUG();
426 
427 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
428 }
429 EXPORT_SYMBOL(xfrm_policy_destroy);
430 
431 /* Rule must be locked. Release descendant resources, announce
432  * entry dead. The rule must be unlinked from lists to the moment.
433  */
434 
435 static void xfrm_policy_kill(struct xfrm_policy *policy)
436 {
437 	policy->walk.dead = 1;
438 
439 	atomic_inc(&policy->genid);
440 
441 	if (del_timer(&policy->polq.hold_timer))
442 		xfrm_pol_put(policy);
443 	skb_queue_purge(&policy->polq.hold_queue);
444 
445 	if (del_timer(&policy->timer))
446 		xfrm_pol_put(policy);
447 
448 	xfrm_pol_put(policy);
449 }
450 
451 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
452 
453 static inline unsigned int idx_hash(struct net *net, u32 index)
454 {
455 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
456 }
457 
458 /* calculate policy hash thresholds */
459 static void __get_hash_thresh(struct net *net,
460 			      unsigned short family, int dir,
461 			      u8 *dbits, u8 *sbits)
462 {
463 	switch (family) {
464 	case AF_INET:
465 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
466 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
467 		break;
468 
469 	case AF_INET6:
470 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
471 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
472 		break;
473 
474 	default:
475 		*dbits = 0;
476 		*sbits = 0;
477 	}
478 }
479 
480 static struct hlist_head *policy_hash_bysel(struct net *net,
481 					    const struct xfrm_selector *sel,
482 					    unsigned short family, int dir)
483 {
484 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
485 	unsigned int hash;
486 	u8 dbits;
487 	u8 sbits;
488 
489 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
490 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
491 
492 	if (hash == hmask + 1)
493 		return NULL;
494 
495 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
496 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
497 }
498 
499 static struct hlist_head *policy_hash_direct(struct net *net,
500 					     const xfrm_address_t *daddr,
501 					     const xfrm_address_t *saddr,
502 					     unsigned short family, int dir)
503 {
504 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
505 	unsigned int hash;
506 	u8 dbits;
507 	u8 sbits;
508 
509 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
510 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
511 
512 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
513 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
514 }
515 
516 static void xfrm_dst_hash_transfer(struct net *net,
517 				   struct hlist_head *list,
518 				   struct hlist_head *ndsttable,
519 				   unsigned int nhashmask,
520 				   int dir)
521 {
522 	struct hlist_node *tmp, *entry0 = NULL;
523 	struct xfrm_policy *pol;
524 	unsigned int h0 = 0;
525 	u8 dbits;
526 	u8 sbits;
527 
528 redo:
529 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
530 		unsigned int h;
531 
532 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
533 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
534 				pol->family, nhashmask, dbits, sbits);
535 		if (!entry0) {
536 			hlist_del_rcu(&pol->bydst);
537 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
538 			h0 = h;
539 		} else {
540 			if (h != h0)
541 				continue;
542 			hlist_del_rcu(&pol->bydst);
543 			hlist_add_behind_rcu(&pol->bydst, entry0);
544 		}
545 		entry0 = &pol->bydst;
546 	}
547 	if (!hlist_empty(list)) {
548 		entry0 = NULL;
549 		goto redo;
550 	}
551 }
552 
553 static void xfrm_idx_hash_transfer(struct hlist_head *list,
554 				   struct hlist_head *nidxtable,
555 				   unsigned int nhashmask)
556 {
557 	struct hlist_node *tmp;
558 	struct xfrm_policy *pol;
559 
560 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
561 		unsigned int h;
562 
563 		h = __idx_hash(pol->index, nhashmask);
564 		hlist_add_head(&pol->byidx, nidxtable+h);
565 	}
566 }
567 
568 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
569 {
570 	return ((old_hmask + 1) << 1) - 1;
571 }
572 
573 static void xfrm_bydst_resize(struct net *net, int dir)
574 {
575 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
576 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
577 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
578 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
579 	struct hlist_head *odst;
580 	int i;
581 
582 	if (!ndst)
583 		return;
584 
585 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
586 	write_seqcount_begin(&xfrm_policy_hash_generation);
587 
588 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
589 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
590 
591 	for (i = hmask; i >= 0; i--)
592 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
593 
594 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
595 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
596 
597 	write_seqcount_end(&xfrm_policy_hash_generation);
598 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
599 
600 	synchronize_rcu();
601 
602 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
603 }
604 
605 static void xfrm_byidx_resize(struct net *net, int total)
606 {
607 	unsigned int hmask = net->xfrm.policy_idx_hmask;
608 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
609 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
610 	struct hlist_head *oidx = net->xfrm.policy_byidx;
611 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
612 	int i;
613 
614 	if (!nidx)
615 		return;
616 
617 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
618 
619 	for (i = hmask; i >= 0; i--)
620 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
621 
622 	net->xfrm.policy_byidx = nidx;
623 	net->xfrm.policy_idx_hmask = nhashmask;
624 
625 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
626 
627 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
628 }
629 
630 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
631 {
632 	unsigned int cnt = net->xfrm.policy_count[dir];
633 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
634 
635 	if (total)
636 		*total += cnt;
637 
638 	if ((hmask + 1) < xfrm_policy_hashmax &&
639 	    cnt > hmask)
640 		return 1;
641 
642 	return 0;
643 }
644 
645 static inline int xfrm_byidx_should_resize(struct net *net, int total)
646 {
647 	unsigned int hmask = net->xfrm.policy_idx_hmask;
648 
649 	if ((hmask + 1) < xfrm_policy_hashmax &&
650 	    total > hmask)
651 		return 1;
652 
653 	return 0;
654 }
655 
656 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
657 {
658 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
659 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
660 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
661 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
662 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
663 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
664 	si->spdhcnt = net->xfrm.policy_idx_hmask;
665 	si->spdhmcnt = xfrm_policy_hashmax;
666 }
667 EXPORT_SYMBOL(xfrm_spd_getinfo);
668 
669 static DEFINE_MUTEX(hash_resize_mutex);
670 static void xfrm_hash_resize(struct work_struct *work)
671 {
672 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
673 	int dir, total;
674 
675 	mutex_lock(&hash_resize_mutex);
676 
677 	total = 0;
678 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
679 		if (xfrm_bydst_should_resize(net, dir, &total))
680 			xfrm_bydst_resize(net, dir);
681 	}
682 	if (xfrm_byidx_should_resize(net, total))
683 		xfrm_byidx_resize(net, total);
684 
685 	mutex_unlock(&hash_resize_mutex);
686 }
687 
688 /* Make sure *pol can be inserted into fastbin.
689  * Useful to check that later insert requests will be sucessful
690  * (provided xfrm_policy_lock is held throughout).
691  */
692 static struct xfrm_pol_inexact_bin *
693 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
694 {
695 	struct xfrm_pol_inexact_bin *bin, *prev;
696 	struct xfrm_pol_inexact_key k = {
697 		.family = pol->family,
698 		.type = pol->type,
699 		.dir = dir,
700 		.if_id = pol->if_id,
701 	};
702 	struct net *net = xp_net(pol);
703 
704 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
705 
706 	write_pnet(&k.net, net);
707 	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
708 				     xfrm_pol_inexact_params);
709 	if (bin)
710 		return bin;
711 
712 	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
713 	if (!bin)
714 		return NULL;
715 
716 	bin->k = k;
717 	INIT_HLIST_HEAD(&bin->hhead);
718 	bin->root_d = RB_ROOT;
719 	bin->root_s = RB_ROOT;
720 	seqcount_init(&bin->count);
721 
722 	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
723 						&bin->k, &bin->head,
724 						xfrm_pol_inexact_params);
725 	if (!prev) {
726 		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
727 		return bin;
728 	}
729 
730 	kfree(bin);
731 
732 	return IS_ERR(prev) ? NULL : prev;
733 }
734 
735 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
736 					       int family, u8 prefixlen)
737 {
738 	if (xfrm_addr_any(addr, family))
739 		return true;
740 
741 	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
742 		return true;
743 
744 	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
745 		return true;
746 
747 	return false;
748 }
749 
750 static bool
751 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
752 {
753 	const xfrm_address_t *addr;
754 	bool saddr_any, daddr_any;
755 	u8 prefixlen;
756 
757 	addr = &policy->selector.saddr;
758 	prefixlen = policy->selector.prefixlen_s;
759 
760 	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
761 						       policy->family,
762 						       prefixlen);
763 	addr = &policy->selector.daddr;
764 	prefixlen = policy->selector.prefixlen_d;
765 	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
766 						       policy->family,
767 						       prefixlen);
768 	return saddr_any && daddr_any;
769 }
770 
771 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
772 				       const xfrm_address_t *addr, u8 prefixlen)
773 {
774 	node->addr = *addr;
775 	node->prefixlen = prefixlen;
776 }
777 
778 static struct xfrm_pol_inexact_node *
779 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
780 {
781 	struct xfrm_pol_inexact_node *node;
782 
783 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
784 	if (node)
785 		xfrm_pol_inexact_node_init(node, addr, prefixlen);
786 
787 	return node;
788 }
789 
790 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
791 				  const xfrm_address_t *b,
792 				  u8 prefixlen, u16 family)
793 {
794 	unsigned int pdw, pbi;
795 	int delta = 0;
796 
797 	switch (family) {
798 	case AF_INET:
799 		if (sizeof(long) == 4 && prefixlen == 0)
800 			return ntohl(a->a4) - ntohl(b->a4);
801 		return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
802 		       (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
803 	case AF_INET6:
804 		pdw = prefixlen >> 5;
805 		pbi = prefixlen & 0x1f;
806 
807 		if (pdw) {
808 			delta = memcmp(a->a6, b->a6, pdw << 2);
809 			if (delta)
810 				return delta;
811 		}
812 		if (pbi) {
813 			u32 mask = ~0u << (32 - pbi);
814 
815 			delta = (ntohl(a->a6[pdw]) & mask) -
816 				(ntohl(b->a6[pdw]) & mask);
817 		}
818 		break;
819 	default:
820 		break;
821 	}
822 
823 	return delta;
824 }
825 
826 static void xfrm_policy_inexact_list_reinsert(struct net *net,
827 					      struct xfrm_pol_inexact_node *n,
828 					      u16 family)
829 {
830 	unsigned int matched_s, matched_d;
831 	struct xfrm_policy *policy, *p;
832 
833 	matched_s = 0;
834 	matched_d = 0;
835 
836 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
837 		struct hlist_node *newpos = NULL;
838 		bool matches_s, matches_d;
839 
840 		if (!policy->bydst_reinsert)
841 			continue;
842 
843 		WARN_ON_ONCE(policy->family != family);
844 
845 		policy->bydst_reinsert = false;
846 		hlist_for_each_entry(p, &n->hhead, bydst) {
847 			if (policy->priority > p->priority)
848 				newpos = &p->bydst;
849 			else if (policy->priority == p->priority &&
850 				 policy->pos > p->pos)
851 				newpos = &p->bydst;
852 			else
853 				break;
854 		}
855 
856 		if (newpos)
857 			hlist_add_behind_rcu(&policy->bydst, newpos);
858 		else
859 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
860 
861 		/* paranoia checks follow.
862 		 * Check that the reinserted policy matches at least
863 		 * saddr or daddr for current node prefix.
864 		 *
865 		 * Matching both is fine, matching saddr in one policy
866 		 * (but not daddr) and then matching only daddr in another
867 		 * is a bug.
868 		 */
869 		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
870 						   &n->addr,
871 						   n->prefixlen,
872 						   family) == 0;
873 		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
874 						   &n->addr,
875 						   n->prefixlen,
876 						   family) == 0;
877 		if (matches_s && matches_d)
878 			continue;
879 
880 		WARN_ON_ONCE(!matches_s && !matches_d);
881 		if (matches_s)
882 			matched_s++;
883 		if (matches_d)
884 			matched_d++;
885 		WARN_ON_ONCE(matched_s && matched_d);
886 	}
887 }
888 
889 static void xfrm_policy_inexact_node_reinsert(struct net *net,
890 					      struct xfrm_pol_inexact_node *n,
891 					      struct rb_root *new,
892 					      u16 family)
893 {
894 	struct xfrm_pol_inexact_node *node;
895 	struct rb_node **p, *parent;
896 
897 	/* we should not have another subtree here */
898 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
899 restart:
900 	parent = NULL;
901 	p = &new->rb_node;
902 	while (*p) {
903 		u8 prefixlen;
904 		int delta;
905 
906 		parent = *p;
907 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
908 
909 		prefixlen = min(node->prefixlen, n->prefixlen);
910 
911 		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
912 					       prefixlen, family);
913 		if (delta < 0) {
914 			p = &parent->rb_left;
915 		} else if (delta > 0) {
916 			p = &parent->rb_right;
917 		} else {
918 			bool same_prefixlen = node->prefixlen == n->prefixlen;
919 			struct xfrm_policy *tmp;
920 
921 			hlist_for_each_entry(tmp, &n->hhead, bydst) {
922 				tmp->bydst_reinsert = true;
923 				hlist_del_rcu(&tmp->bydst);
924 			}
925 
926 			node->prefixlen = prefixlen;
927 
928 			xfrm_policy_inexact_list_reinsert(net, node, family);
929 
930 			if (same_prefixlen) {
931 				kfree_rcu(n, rcu);
932 				return;
933 			}
934 
935 			rb_erase(*p, new);
936 			kfree_rcu(n, rcu);
937 			n = node;
938 			goto restart;
939 		}
940 	}
941 
942 	rb_link_node_rcu(&n->node, parent, p);
943 	rb_insert_color(&n->node, new);
944 }
945 
946 /* merge nodes v and n */
947 static void xfrm_policy_inexact_node_merge(struct net *net,
948 					   struct xfrm_pol_inexact_node *v,
949 					   struct xfrm_pol_inexact_node *n,
950 					   u16 family)
951 {
952 	struct xfrm_pol_inexact_node *node;
953 	struct xfrm_policy *tmp;
954 	struct rb_node *rnode;
955 
956 	/* To-be-merged node v has a subtree.
957 	 *
958 	 * Dismantle it and insert its nodes to n->root.
959 	 */
960 	while ((rnode = rb_first(&v->root)) != NULL) {
961 		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
962 		rb_erase(&node->node, &v->root);
963 		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
964 						  family);
965 	}
966 
967 	hlist_for_each_entry(tmp, &v->hhead, bydst) {
968 		tmp->bydst_reinsert = true;
969 		hlist_del_rcu(&tmp->bydst);
970 	}
971 
972 	xfrm_policy_inexact_list_reinsert(net, n, family);
973 }
974 
975 static struct xfrm_pol_inexact_node *
976 xfrm_policy_inexact_insert_node(struct net *net,
977 				struct rb_root *root,
978 				xfrm_address_t *addr,
979 				u16 family, u8 prefixlen, u8 dir)
980 {
981 	struct xfrm_pol_inexact_node *cached = NULL;
982 	struct rb_node **p, *parent = NULL;
983 	struct xfrm_pol_inexact_node *node;
984 
985 	p = &root->rb_node;
986 	while (*p) {
987 		int delta;
988 
989 		parent = *p;
990 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
991 
992 		delta = xfrm_policy_addr_delta(addr, &node->addr,
993 					       node->prefixlen,
994 					       family);
995 		if (delta == 0 && prefixlen >= node->prefixlen) {
996 			WARN_ON_ONCE(cached); /* ipsec policies got lost */
997 			return node;
998 		}
999 
1000 		if (delta < 0)
1001 			p = &parent->rb_left;
1002 		else
1003 			p = &parent->rb_right;
1004 
1005 		if (prefixlen < node->prefixlen) {
1006 			delta = xfrm_policy_addr_delta(addr, &node->addr,
1007 						       prefixlen,
1008 						       family);
1009 			if (delta)
1010 				continue;
1011 
1012 			/* This node is a subnet of the new prefix. It needs
1013 			 * to be removed and re-inserted with the smaller
1014 			 * prefix and all nodes that are now also covered
1015 			 * by the reduced prefixlen.
1016 			 */
1017 			rb_erase(&node->node, root);
1018 
1019 			if (!cached) {
1020 				xfrm_pol_inexact_node_init(node, addr,
1021 							   prefixlen);
1022 				cached = node;
1023 			} else {
1024 				/* This node also falls within the new
1025 				 * prefixlen. Merge the to-be-reinserted
1026 				 * node and this one.
1027 				 */
1028 				xfrm_policy_inexact_node_merge(net, node,
1029 							       cached, family);
1030 				kfree_rcu(node, rcu);
1031 			}
1032 
1033 			/* restart */
1034 			p = &root->rb_node;
1035 			parent = NULL;
1036 		}
1037 	}
1038 
1039 	node = cached;
1040 	if (!node) {
1041 		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1042 		if (!node)
1043 			return NULL;
1044 	}
1045 
1046 	rb_link_node_rcu(&node->node, parent, p);
1047 	rb_insert_color(&node->node, root);
1048 
1049 	return node;
1050 }
1051 
1052 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1053 {
1054 	struct xfrm_pol_inexact_node *node;
1055 	struct rb_node *rn = rb_first(r);
1056 
1057 	while (rn) {
1058 		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1059 
1060 		xfrm_policy_inexact_gc_tree(&node->root, rm);
1061 		rn = rb_next(rn);
1062 
1063 		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1064 			WARN_ON_ONCE(rm);
1065 			continue;
1066 		}
1067 
1068 		rb_erase(&node->node, r);
1069 		kfree_rcu(node, rcu);
1070 	}
1071 }
1072 
1073 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1074 {
1075 	write_seqcount_begin(&b->count);
1076 	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1077 	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1078 	write_seqcount_end(&b->count);
1079 
1080 	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1081 	    !hlist_empty(&b->hhead)) {
1082 		WARN_ON_ONCE(net_exit);
1083 		return;
1084 	}
1085 
1086 	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1087 				   xfrm_pol_inexact_params) == 0) {
1088 		list_del(&b->inexact_bins);
1089 		kfree_rcu(b, rcu);
1090 	}
1091 }
1092 
1093 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1094 {
1095 	struct net *net = read_pnet(&b->k.net);
1096 
1097 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1098 	__xfrm_policy_inexact_prune_bin(b, false);
1099 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1100 }
1101 
1102 static void __xfrm_policy_inexact_flush(struct net *net)
1103 {
1104 	struct xfrm_pol_inexact_bin *bin, *t;
1105 
1106 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1107 
1108 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1109 		__xfrm_policy_inexact_prune_bin(bin, false);
1110 }
1111 
1112 static struct hlist_head *
1113 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1114 				struct xfrm_policy *policy, u8 dir)
1115 {
1116 	struct xfrm_pol_inexact_node *n;
1117 	struct net *net;
1118 
1119 	net = xp_net(policy);
1120 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1121 
1122 	if (xfrm_policy_inexact_insert_use_any_list(policy))
1123 		return &bin->hhead;
1124 
1125 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1126 					       policy->family,
1127 					       policy->selector.prefixlen_d)) {
1128 		write_seqcount_begin(&bin->count);
1129 		n = xfrm_policy_inexact_insert_node(net,
1130 						    &bin->root_s,
1131 						    &policy->selector.saddr,
1132 						    policy->family,
1133 						    policy->selector.prefixlen_s,
1134 						    dir);
1135 		write_seqcount_end(&bin->count);
1136 		if (!n)
1137 			return NULL;
1138 
1139 		return &n->hhead;
1140 	}
1141 
1142 	/* daddr is fixed */
1143 	write_seqcount_begin(&bin->count);
1144 	n = xfrm_policy_inexact_insert_node(net,
1145 					    &bin->root_d,
1146 					    &policy->selector.daddr,
1147 					    policy->family,
1148 					    policy->selector.prefixlen_d, dir);
1149 	write_seqcount_end(&bin->count);
1150 	if (!n)
1151 		return NULL;
1152 
1153 	/* saddr is wildcard */
1154 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1155 					       policy->family,
1156 					       policy->selector.prefixlen_s))
1157 		return &n->hhead;
1158 
1159 	write_seqcount_begin(&bin->count);
1160 	n = xfrm_policy_inexact_insert_node(net,
1161 					    &n->root,
1162 					    &policy->selector.saddr,
1163 					    policy->family,
1164 					    policy->selector.prefixlen_s, dir);
1165 	write_seqcount_end(&bin->count);
1166 	if (!n)
1167 		return NULL;
1168 
1169 	return &n->hhead;
1170 }
1171 
1172 static struct xfrm_policy *
1173 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1174 {
1175 	struct xfrm_pol_inexact_bin *bin;
1176 	struct xfrm_policy *delpol;
1177 	struct hlist_head *chain;
1178 	struct net *net;
1179 
1180 	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1181 	if (!bin)
1182 		return ERR_PTR(-ENOMEM);
1183 
1184 	net = xp_net(policy);
1185 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1186 
1187 	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1188 	if (!chain) {
1189 		__xfrm_policy_inexact_prune_bin(bin, false);
1190 		return ERR_PTR(-ENOMEM);
1191 	}
1192 
1193 	delpol = xfrm_policy_insert_list(chain, policy, excl);
1194 	if (delpol && excl) {
1195 		__xfrm_policy_inexact_prune_bin(bin, false);
1196 		return ERR_PTR(-EEXIST);
1197 	}
1198 
1199 	chain = &net->xfrm.policy_inexact[dir];
1200 	xfrm_policy_insert_inexact_list(chain, policy);
1201 
1202 	if (delpol)
1203 		__xfrm_policy_inexact_prune_bin(bin, false);
1204 
1205 	return delpol;
1206 }
1207 
1208 static void xfrm_hash_rebuild(struct work_struct *work)
1209 {
1210 	struct net *net = container_of(work, struct net,
1211 				       xfrm.policy_hthresh.work);
1212 	unsigned int hmask;
1213 	struct xfrm_policy *pol;
1214 	struct xfrm_policy *policy;
1215 	struct hlist_head *chain;
1216 	struct hlist_head *odst;
1217 	struct hlist_node *newpos;
1218 	int i;
1219 	int dir;
1220 	unsigned seq;
1221 	u8 lbits4, rbits4, lbits6, rbits6;
1222 
1223 	mutex_lock(&hash_resize_mutex);
1224 
1225 	/* read selector prefixlen thresholds */
1226 	do {
1227 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1228 
1229 		lbits4 = net->xfrm.policy_hthresh.lbits4;
1230 		rbits4 = net->xfrm.policy_hthresh.rbits4;
1231 		lbits6 = net->xfrm.policy_hthresh.lbits6;
1232 		rbits6 = net->xfrm.policy_hthresh.rbits6;
1233 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1234 
1235 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1236 	write_seqcount_begin(&xfrm_policy_hash_generation);
1237 
1238 	/* make sure that we can insert the indirect policies again before
1239 	 * we start with destructive action.
1240 	 */
1241 	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1242 		struct xfrm_pol_inexact_bin *bin;
1243 		u8 dbits, sbits;
1244 
1245 		dir = xfrm_policy_id2dir(policy->index);
1246 		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1247 			continue;
1248 
1249 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1250 			if (policy->family == AF_INET) {
1251 				dbits = rbits4;
1252 				sbits = lbits4;
1253 			} else {
1254 				dbits = rbits6;
1255 				sbits = lbits6;
1256 			}
1257 		} else {
1258 			if (policy->family == AF_INET) {
1259 				dbits = lbits4;
1260 				sbits = rbits4;
1261 			} else {
1262 				dbits = lbits6;
1263 				sbits = rbits6;
1264 			}
1265 		}
1266 
1267 		if (policy->selector.prefixlen_d < dbits ||
1268 		    policy->selector.prefixlen_s < sbits)
1269 			continue;
1270 
1271 		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1272 		if (!bin)
1273 			goto out_unlock;
1274 
1275 		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1276 			goto out_unlock;
1277 	}
1278 
1279 	/* reset the bydst and inexact table in all directions */
1280 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1281 		struct hlist_node *n;
1282 
1283 		hlist_for_each_entry_safe(policy, n,
1284 					  &net->xfrm.policy_inexact[dir],
1285 					  bydst_inexact_list) {
1286 			hlist_del_rcu(&policy->bydst);
1287 			hlist_del_init(&policy->bydst_inexact_list);
1288 		}
1289 
1290 		hmask = net->xfrm.policy_bydst[dir].hmask;
1291 		odst = net->xfrm.policy_bydst[dir].table;
1292 		for (i = hmask; i >= 0; i--) {
1293 			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1294 				hlist_del_rcu(&policy->bydst);
1295 		}
1296 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1297 			/* dir out => dst = remote, src = local */
1298 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1299 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1300 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1301 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1302 		} else {
1303 			/* dir in/fwd => dst = local, src = remote */
1304 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1305 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1306 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1307 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1308 		}
1309 	}
1310 
1311 	/* re-insert all policies by order of creation */
1312 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1313 		if (policy->walk.dead)
1314 			continue;
1315 		dir = xfrm_policy_id2dir(policy->index);
1316 		if (dir >= XFRM_POLICY_MAX) {
1317 			/* skip socket policies */
1318 			continue;
1319 		}
1320 		newpos = NULL;
1321 		chain = policy_hash_bysel(net, &policy->selector,
1322 					  policy->family, dir);
1323 
1324 		if (!chain) {
1325 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1326 
1327 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1328 			continue;
1329 		}
1330 
1331 		hlist_for_each_entry(pol, chain, bydst) {
1332 			if (policy->priority >= pol->priority)
1333 				newpos = &pol->bydst;
1334 			else
1335 				break;
1336 		}
1337 		if (newpos)
1338 			hlist_add_behind_rcu(&policy->bydst, newpos);
1339 		else
1340 			hlist_add_head_rcu(&policy->bydst, chain);
1341 	}
1342 
1343 out_unlock:
1344 	__xfrm_policy_inexact_flush(net);
1345 	write_seqcount_end(&xfrm_policy_hash_generation);
1346 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1347 
1348 	mutex_unlock(&hash_resize_mutex);
1349 }
1350 
1351 void xfrm_policy_hash_rebuild(struct net *net)
1352 {
1353 	schedule_work(&net->xfrm.policy_hthresh.work);
1354 }
1355 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1356 
1357 /* Generate new index... KAME seems to generate them ordered by cost
1358  * of an absolute inpredictability of ordering of rules. This will not pass. */
1359 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1360 {
1361 	static u32 idx_generator;
1362 
1363 	for (;;) {
1364 		struct hlist_head *list;
1365 		struct xfrm_policy *p;
1366 		u32 idx;
1367 		int found;
1368 
1369 		if (!index) {
1370 			idx = (idx_generator | dir);
1371 			idx_generator += 8;
1372 		} else {
1373 			idx = index;
1374 			index = 0;
1375 		}
1376 
1377 		if (idx == 0)
1378 			idx = 8;
1379 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1380 		found = 0;
1381 		hlist_for_each_entry(p, list, byidx) {
1382 			if (p->index == idx) {
1383 				found = 1;
1384 				break;
1385 			}
1386 		}
1387 		if (!found)
1388 			return idx;
1389 	}
1390 }
1391 
1392 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1393 {
1394 	u32 *p1 = (u32 *) s1;
1395 	u32 *p2 = (u32 *) s2;
1396 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1397 	int i;
1398 
1399 	for (i = 0; i < len; i++) {
1400 		if (p1[i] != p2[i])
1401 			return 1;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void xfrm_policy_requeue(struct xfrm_policy *old,
1408 				struct xfrm_policy *new)
1409 {
1410 	struct xfrm_policy_queue *pq = &old->polq;
1411 	struct sk_buff_head list;
1412 
1413 	if (skb_queue_empty(&pq->hold_queue))
1414 		return;
1415 
1416 	__skb_queue_head_init(&list);
1417 
1418 	spin_lock_bh(&pq->hold_queue.lock);
1419 	skb_queue_splice_init(&pq->hold_queue, &list);
1420 	if (del_timer(&pq->hold_timer))
1421 		xfrm_pol_put(old);
1422 	spin_unlock_bh(&pq->hold_queue.lock);
1423 
1424 	pq = &new->polq;
1425 
1426 	spin_lock_bh(&pq->hold_queue.lock);
1427 	skb_queue_splice(&list, &pq->hold_queue);
1428 	pq->timeout = XFRM_QUEUE_TMO_MIN;
1429 	if (!mod_timer(&pq->hold_timer, jiffies))
1430 		xfrm_pol_hold(new);
1431 	spin_unlock_bh(&pq->hold_queue.lock);
1432 }
1433 
1434 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
1435 				   struct xfrm_policy *pol)
1436 {
1437 	u32 mark = policy->mark.v & policy->mark.m;
1438 
1439 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
1440 		return true;
1441 
1442 	if ((mark & pol->mark.m) == pol->mark.v &&
1443 	    policy->priority == pol->priority)
1444 		return true;
1445 
1446 	return false;
1447 }
1448 
1449 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1450 {
1451 	const struct xfrm_pol_inexact_key *k = data;
1452 	u32 a = k->type << 24 | k->dir << 16 | k->family;
1453 
1454 	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1455 			    seed);
1456 }
1457 
1458 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1459 {
1460 	const struct xfrm_pol_inexact_bin *b = data;
1461 
1462 	return xfrm_pol_bin_key(&b->k, 0, seed);
1463 }
1464 
1465 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1466 			    const void *ptr)
1467 {
1468 	const struct xfrm_pol_inexact_key *key = arg->key;
1469 	const struct xfrm_pol_inexact_bin *b = ptr;
1470 	int ret;
1471 
1472 	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1473 		return -1;
1474 
1475 	ret = b->k.dir ^ key->dir;
1476 	if (ret)
1477 		return ret;
1478 
1479 	ret = b->k.type ^ key->type;
1480 	if (ret)
1481 		return ret;
1482 
1483 	ret = b->k.family ^ key->family;
1484 	if (ret)
1485 		return ret;
1486 
1487 	return b->k.if_id ^ key->if_id;
1488 }
1489 
1490 static const struct rhashtable_params xfrm_pol_inexact_params = {
1491 	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1492 	.hashfn			= xfrm_pol_bin_key,
1493 	.obj_hashfn		= xfrm_pol_bin_obj,
1494 	.obj_cmpfn		= xfrm_pol_bin_cmp,
1495 	.automatic_shrinking	= true,
1496 };
1497 
1498 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1499 					    struct xfrm_policy *policy)
1500 {
1501 	struct xfrm_policy *pol, *delpol = NULL;
1502 	struct hlist_node *newpos = NULL;
1503 	int i = 0;
1504 
1505 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1506 		if (pol->type == policy->type &&
1507 		    pol->if_id == policy->if_id &&
1508 		    !selector_cmp(&pol->selector, &policy->selector) &&
1509 		    xfrm_policy_mark_match(policy, pol) &&
1510 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1511 		    !WARN_ON(delpol)) {
1512 			delpol = pol;
1513 			if (policy->priority > pol->priority)
1514 				continue;
1515 		} else if (policy->priority >= pol->priority) {
1516 			newpos = &pol->bydst_inexact_list;
1517 			continue;
1518 		}
1519 		if (delpol)
1520 			break;
1521 	}
1522 
1523 	if (newpos)
1524 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1525 	else
1526 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1527 
1528 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1529 		pol->pos = i;
1530 		i++;
1531 	}
1532 }
1533 
1534 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1535 						   struct xfrm_policy *policy,
1536 						   bool excl)
1537 {
1538 	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1539 
1540 	hlist_for_each_entry(pol, chain, bydst) {
1541 		if (pol->type == policy->type &&
1542 		    pol->if_id == policy->if_id &&
1543 		    !selector_cmp(&pol->selector, &policy->selector) &&
1544 		    xfrm_policy_mark_match(policy, pol) &&
1545 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1546 		    !WARN_ON(delpol)) {
1547 			if (excl)
1548 				return ERR_PTR(-EEXIST);
1549 			delpol = pol;
1550 			if (policy->priority > pol->priority)
1551 				continue;
1552 		} else if (policy->priority >= pol->priority) {
1553 			newpos = pol;
1554 			continue;
1555 		}
1556 		if (delpol)
1557 			break;
1558 	}
1559 
1560 	if (newpos)
1561 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1562 	else
1563 		hlist_add_head_rcu(&policy->bydst, chain);
1564 
1565 	return delpol;
1566 }
1567 
1568 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1569 {
1570 	struct net *net = xp_net(policy);
1571 	struct xfrm_policy *delpol;
1572 	struct hlist_head *chain;
1573 
1574 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1575 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1576 	if (chain)
1577 		delpol = xfrm_policy_insert_list(chain, policy, excl);
1578 	else
1579 		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1580 
1581 	if (IS_ERR(delpol)) {
1582 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1583 		return PTR_ERR(delpol);
1584 	}
1585 
1586 	__xfrm_policy_link(policy, dir);
1587 
1588 	/* After previous checking, family can either be AF_INET or AF_INET6 */
1589 	if (policy->family == AF_INET)
1590 		rt_genid_bump_ipv4(net);
1591 	else
1592 		rt_genid_bump_ipv6(net);
1593 
1594 	if (delpol) {
1595 		xfrm_policy_requeue(delpol, policy);
1596 		__xfrm_policy_unlink(delpol, dir);
1597 	}
1598 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1599 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1600 	policy->curlft.add_time = ktime_get_real_seconds();
1601 	policy->curlft.use_time = 0;
1602 	if (!mod_timer(&policy->timer, jiffies + HZ))
1603 		xfrm_pol_hold(policy);
1604 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1605 
1606 	if (delpol)
1607 		xfrm_policy_kill(delpol);
1608 	else if (xfrm_bydst_should_resize(net, dir, NULL))
1609 		schedule_work(&net->xfrm.policy_hash_work);
1610 
1611 	return 0;
1612 }
1613 EXPORT_SYMBOL(xfrm_policy_insert);
1614 
1615 static struct xfrm_policy *
1616 __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
1617 			u8 type, int dir,
1618 			struct xfrm_selector *sel,
1619 			struct xfrm_sec_ctx *ctx)
1620 {
1621 	struct xfrm_policy *pol;
1622 
1623 	if (!chain)
1624 		return NULL;
1625 
1626 	hlist_for_each_entry(pol, chain, bydst) {
1627 		if (pol->type == type &&
1628 		    pol->if_id == if_id &&
1629 		    (mark & pol->mark.m) == pol->mark.v &&
1630 		    !selector_cmp(sel, &pol->selector) &&
1631 		    xfrm_sec_ctx_match(ctx, pol->security))
1632 			return pol;
1633 	}
1634 
1635 	return NULL;
1636 }
1637 
1638 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1639 					  u8 type, int dir,
1640 					  struct xfrm_selector *sel,
1641 					  struct xfrm_sec_ctx *ctx, int delete,
1642 					  int *err)
1643 {
1644 	struct xfrm_pol_inexact_bin *bin = NULL;
1645 	struct xfrm_policy *pol, *ret = NULL;
1646 	struct hlist_head *chain;
1647 
1648 	*err = 0;
1649 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1650 	chain = policy_hash_bysel(net, sel, sel->family, dir);
1651 	if (!chain) {
1652 		struct xfrm_pol_inexact_candidates cand;
1653 		int i;
1654 
1655 		bin = xfrm_policy_inexact_lookup(net, type,
1656 						 sel->family, dir, if_id);
1657 		if (!bin) {
1658 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1659 			return NULL;
1660 		}
1661 
1662 		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1663 							 &sel->saddr,
1664 							 &sel->daddr)) {
1665 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1666 			return NULL;
1667 		}
1668 
1669 		pol = NULL;
1670 		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1671 			struct xfrm_policy *tmp;
1672 
1673 			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1674 						      if_id, type, dir,
1675 						      sel, ctx);
1676 			if (!tmp)
1677 				continue;
1678 
1679 			if (!pol || tmp->pos < pol->pos)
1680 				pol = tmp;
1681 		}
1682 	} else {
1683 		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1684 					      sel, ctx);
1685 	}
1686 
1687 	if (pol) {
1688 		xfrm_pol_hold(pol);
1689 		if (delete) {
1690 			*err = security_xfrm_policy_delete(pol->security);
1691 			if (*err) {
1692 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1693 				return pol;
1694 			}
1695 			__xfrm_policy_unlink(pol, dir);
1696 		}
1697 		ret = pol;
1698 	}
1699 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1700 
1701 	if (ret && delete)
1702 		xfrm_policy_kill(ret);
1703 	if (bin && delete)
1704 		xfrm_policy_inexact_prune_bin(bin);
1705 	return ret;
1706 }
1707 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1708 
1709 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
1710 				     u8 type, int dir, u32 id, int delete,
1711 				     int *err)
1712 {
1713 	struct xfrm_policy *pol, *ret;
1714 	struct hlist_head *chain;
1715 
1716 	*err = -ENOENT;
1717 	if (xfrm_policy_id2dir(id) != dir)
1718 		return NULL;
1719 
1720 	*err = 0;
1721 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1722 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1723 	ret = NULL;
1724 	hlist_for_each_entry(pol, chain, byidx) {
1725 		if (pol->type == type && pol->index == id &&
1726 		    pol->if_id == if_id &&
1727 		    (mark & pol->mark.m) == pol->mark.v) {
1728 			xfrm_pol_hold(pol);
1729 			if (delete) {
1730 				*err = security_xfrm_policy_delete(
1731 								pol->security);
1732 				if (*err) {
1733 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1734 					return pol;
1735 				}
1736 				__xfrm_policy_unlink(pol, dir);
1737 			}
1738 			ret = pol;
1739 			break;
1740 		}
1741 	}
1742 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1743 
1744 	if (ret && delete)
1745 		xfrm_policy_kill(ret);
1746 	return ret;
1747 }
1748 EXPORT_SYMBOL(xfrm_policy_byid);
1749 
1750 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1751 static inline int
1752 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1753 {
1754 	struct xfrm_policy *pol;
1755 	int err = 0;
1756 
1757 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1758 		if (pol->walk.dead ||
1759 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1760 		    pol->type != type)
1761 			continue;
1762 
1763 		err = security_xfrm_policy_delete(pol->security);
1764 		if (err) {
1765 			xfrm_audit_policy_delete(pol, 0, task_valid);
1766 			return err;
1767 		}
1768 	}
1769 	return err;
1770 }
1771 #else
1772 static inline int
1773 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1774 {
1775 	return 0;
1776 }
1777 #endif
1778 
1779 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1780 {
1781 	int dir, err = 0, cnt = 0;
1782 	struct xfrm_policy *pol;
1783 
1784 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1785 
1786 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1787 	if (err)
1788 		goto out;
1789 
1790 again:
1791 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1792 		dir = xfrm_policy_id2dir(pol->index);
1793 		if (pol->walk.dead ||
1794 		    dir >= XFRM_POLICY_MAX ||
1795 		    pol->type != type)
1796 			continue;
1797 
1798 		__xfrm_policy_unlink(pol, dir);
1799 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1800 		cnt++;
1801 		xfrm_audit_policy_delete(pol, 1, task_valid);
1802 		xfrm_policy_kill(pol);
1803 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1804 		goto again;
1805 	}
1806 	if (cnt)
1807 		__xfrm_policy_inexact_flush(net);
1808 	else
1809 		err = -ESRCH;
1810 out:
1811 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1812 	return err;
1813 }
1814 EXPORT_SYMBOL(xfrm_policy_flush);
1815 
1816 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1817 		     int (*func)(struct xfrm_policy *, int, int, void*),
1818 		     void *data)
1819 {
1820 	struct xfrm_policy *pol;
1821 	struct xfrm_policy_walk_entry *x;
1822 	int error = 0;
1823 
1824 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1825 	    walk->type != XFRM_POLICY_TYPE_ANY)
1826 		return -EINVAL;
1827 
1828 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1829 		return 0;
1830 
1831 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1832 	if (list_empty(&walk->walk.all))
1833 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1834 	else
1835 		x = list_first_entry(&walk->walk.all,
1836 				     struct xfrm_policy_walk_entry, all);
1837 
1838 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1839 		if (x->dead)
1840 			continue;
1841 		pol = container_of(x, struct xfrm_policy, walk);
1842 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1843 		    walk->type != pol->type)
1844 			continue;
1845 		error = func(pol, xfrm_policy_id2dir(pol->index),
1846 			     walk->seq, data);
1847 		if (error) {
1848 			list_move_tail(&walk->walk.all, &x->all);
1849 			goto out;
1850 		}
1851 		walk->seq++;
1852 	}
1853 	if (walk->seq == 0) {
1854 		error = -ENOENT;
1855 		goto out;
1856 	}
1857 	list_del_init(&walk->walk.all);
1858 out:
1859 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1860 	return error;
1861 }
1862 EXPORT_SYMBOL(xfrm_policy_walk);
1863 
1864 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1865 {
1866 	INIT_LIST_HEAD(&walk->walk.all);
1867 	walk->walk.dead = 1;
1868 	walk->type = type;
1869 	walk->seq = 0;
1870 }
1871 EXPORT_SYMBOL(xfrm_policy_walk_init);
1872 
1873 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1874 {
1875 	if (list_empty(&walk->walk.all))
1876 		return;
1877 
1878 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1879 	list_del(&walk->walk.all);
1880 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1881 }
1882 EXPORT_SYMBOL(xfrm_policy_walk_done);
1883 
1884 /*
1885  * Find policy to apply to this flow.
1886  *
1887  * Returns 0 if policy found, else an -errno.
1888  */
1889 static int xfrm_policy_match(const struct xfrm_policy *pol,
1890 			     const struct flowi *fl,
1891 			     u8 type, u16 family, int dir, u32 if_id)
1892 {
1893 	const struct xfrm_selector *sel = &pol->selector;
1894 	int ret = -ESRCH;
1895 	bool match;
1896 
1897 	if (pol->family != family ||
1898 	    pol->if_id != if_id ||
1899 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1900 	    pol->type != type)
1901 		return ret;
1902 
1903 	match = xfrm_selector_match(sel, fl, family);
1904 	if (match)
1905 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1906 						  dir);
1907 	return ret;
1908 }
1909 
1910 static struct xfrm_pol_inexact_node *
1911 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1912 				seqcount_t *count,
1913 				const xfrm_address_t *addr, u16 family)
1914 {
1915 	const struct rb_node *parent;
1916 	int seq;
1917 
1918 again:
1919 	seq = read_seqcount_begin(count);
1920 
1921 	parent = rcu_dereference_raw(r->rb_node);
1922 	while (parent) {
1923 		struct xfrm_pol_inexact_node *node;
1924 		int delta;
1925 
1926 		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1927 
1928 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1929 					       node->prefixlen, family);
1930 		if (delta < 0) {
1931 			parent = rcu_dereference_raw(parent->rb_left);
1932 			continue;
1933 		} else if (delta > 0) {
1934 			parent = rcu_dereference_raw(parent->rb_right);
1935 			continue;
1936 		}
1937 
1938 		return node;
1939 	}
1940 
1941 	if (read_seqcount_retry(count, seq))
1942 		goto again;
1943 
1944 	return NULL;
1945 }
1946 
1947 static bool
1948 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1949 				    struct xfrm_pol_inexact_bin *b,
1950 				    const xfrm_address_t *saddr,
1951 				    const xfrm_address_t *daddr)
1952 {
1953 	struct xfrm_pol_inexact_node *n;
1954 	u16 family;
1955 
1956 	if (!b)
1957 		return false;
1958 
1959 	family = b->k.family;
1960 	memset(cand, 0, sizeof(*cand));
1961 	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1962 
1963 	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1964 					    family);
1965 	if (n) {
1966 		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1967 		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1968 						    family);
1969 		if (n)
1970 			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1971 	}
1972 
1973 	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1974 					    family);
1975 	if (n)
1976 		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1977 
1978 	return true;
1979 }
1980 
1981 static struct xfrm_pol_inexact_bin *
1982 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1983 			       u8 dir, u32 if_id)
1984 {
1985 	struct xfrm_pol_inexact_key k = {
1986 		.family = family,
1987 		.type = type,
1988 		.dir = dir,
1989 		.if_id = if_id,
1990 	};
1991 
1992 	write_pnet(&k.net, net);
1993 
1994 	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1995 				 xfrm_pol_inexact_params);
1996 }
1997 
1998 static struct xfrm_pol_inexact_bin *
1999 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2000 			   u8 dir, u32 if_id)
2001 {
2002 	struct xfrm_pol_inexact_bin *bin;
2003 
2004 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2005 
2006 	rcu_read_lock();
2007 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2008 	rcu_read_unlock();
2009 
2010 	return bin;
2011 }
2012 
2013 static struct xfrm_policy *
2014 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2015 			      struct xfrm_policy *prefer,
2016 			      const struct flowi *fl,
2017 			      u8 type, u16 family, int dir, u32 if_id)
2018 {
2019 	u32 priority = prefer ? prefer->priority : ~0u;
2020 	struct xfrm_policy *pol;
2021 
2022 	if (!chain)
2023 		return NULL;
2024 
2025 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2026 		int err;
2027 
2028 		if (pol->priority > priority)
2029 			break;
2030 
2031 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2032 		if (err) {
2033 			if (err != -ESRCH)
2034 				return ERR_PTR(err);
2035 
2036 			continue;
2037 		}
2038 
2039 		if (prefer) {
2040 			/* matches.  Is it older than *prefer? */
2041 			if (pol->priority == priority &&
2042 			    prefer->pos < pol->pos)
2043 				return prefer;
2044 		}
2045 
2046 		return pol;
2047 	}
2048 
2049 	return NULL;
2050 }
2051 
2052 static struct xfrm_policy *
2053 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2054 			    struct xfrm_policy *prefer,
2055 			    const struct flowi *fl,
2056 			    u8 type, u16 family, int dir, u32 if_id)
2057 {
2058 	struct xfrm_policy *tmp;
2059 	int i;
2060 
2061 	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2062 		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2063 						    prefer,
2064 						    fl, type, family, dir,
2065 						    if_id);
2066 		if (!tmp)
2067 			continue;
2068 
2069 		if (IS_ERR(tmp))
2070 			return tmp;
2071 		prefer = tmp;
2072 	}
2073 
2074 	return prefer;
2075 }
2076 
2077 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2078 						     const struct flowi *fl,
2079 						     u16 family, u8 dir,
2080 						     u32 if_id)
2081 {
2082 	struct xfrm_pol_inexact_candidates cand;
2083 	const xfrm_address_t *daddr, *saddr;
2084 	struct xfrm_pol_inexact_bin *bin;
2085 	struct xfrm_policy *pol, *ret;
2086 	struct hlist_head *chain;
2087 	unsigned int sequence;
2088 	int err;
2089 
2090 	daddr = xfrm_flowi_daddr(fl, family);
2091 	saddr = xfrm_flowi_saddr(fl, family);
2092 	if (unlikely(!daddr || !saddr))
2093 		return NULL;
2094 
2095 	rcu_read_lock();
2096  retry:
2097 	do {
2098 		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2099 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2100 	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2101 
2102 	ret = NULL;
2103 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2104 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2105 		if (err) {
2106 			if (err == -ESRCH)
2107 				continue;
2108 			else {
2109 				ret = ERR_PTR(err);
2110 				goto fail;
2111 			}
2112 		} else {
2113 			ret = pol;
2114 			break;
2115 		}
2116 	}
2117 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2118 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2119 							 daddr))
2120 		goto skip_inexact;
2121 
2122 	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2123 					  family, dir, if_id);
2124 	if (pol) {
2125 		ret = pol;
2126 		if (IS_ERR(pol))
2127 			goto fail;
2128 	}
2129 
2130 skip_inexact:
2131 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2132 		goto retry;
2133 
2134 	if (ret && !xfrm_pol_hold_rcu(ret))
2135 		goto retry;
2136 fail:
2137 	rcu_read_unlock();
2138 
2139 	return ret;
2140 }
2141 
2142 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2143 					      const struct flowi *fl,
2144 					      u16 family, u8 dir, u32 if_id)
2145 {
2146 #ifdef CONFIG_XFRM_SUB_POLICY
2147 	struct xfrm_policy *pol;
2148 
2149 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2150 					dir, if_id);
2151 	if (pol != NULL)
2152 		return pol;
2153 #endif
2154 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2155 					 dir, if_id);
2156 }
2157 
2158 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2159 						 const struct flowi *fl,
2160 						 u16 family, u32 if_id)
2161 {
2162 	struct xfrm_policy *pol;
2163 
2164 	rcu_read_lock();
2165  again:
2166 	pol = rcu_dereference(sk->sk_policy[dir]);
2167 	if (pol != NULL) {
2168 		bool match;
2169 		int err = 0;
2170 
2171 		if (pol->family != family) {
2172 			pol = NULL;
2173 			goto out;
2174 		}
2175 
2176 		match = xfrm_selector_match(&pol->selector, fl, family);
2177 		if (match) {
2178 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2179 			    pol->if_id != if_id) {
2180 				pol = NULL;
2181 				goto out;
2182 			}
2183 			err = security_xfrm_policy_lookup(pol->security,
2184 						      fl->flowi_secid,
2185 						      dir);
2186 			if (!err) {
2187 				if (!xfrm_pol_hold_rcu(pol))
2188 					goto again;
2189 			} else if (err == -ESRCH) {
2190 				pol = NULL;
2191 			} else {
2192 				pol = ERR_PTR(err);
2193 			}
2194 		} else
2195 			pol = NULL;
2196 	}
2197 out:
2198 	rcu_read_unlock();
2199 	return pol;
2200 }
2201 
2202 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2203 {
2204 	struct net *net = xp_net(pol);
2205 
2206 	list_add(&pol->walk.all, &net->xfrm.policy_all);
2207 	net->xfrm.policy_count[dir]++;
2208 	xfrm_pol_hold(pol);
2209 }
2210 
2211 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2212 						int dir)
2213 {
2214 	struct net *net = xp_net(pol);
2215 
2216 	if (list_empty(&pol->walk.all))
2217 		return NULL;
2218 
2219 	/* Socket policies are not hashed. */
2220 	if (!hlist_unhashed(&pol->bydst)) {
2221 		hlist_del_rcu(&pol->bydst);
2222 		hlist_del_init(&pol->bydst_inexact_list);
2223 		hlist_del(&pol->byidx);
2224 	}
2225 
2226 	list_del_init(&pol->walk.all);
2227 	net->xfrm.policy_count[dir]--;
2228 
2229 	return pol;
2230 }
2231 
2232 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2233 {
2234 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2235 }
2236 
2237 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2238 {
2239 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2240 }
2241 
2242 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2243 {
2244 	struct net *net = xp_net(pol);
2245 
2246 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2247 	pol = __xfrm_policy_unlink(pol, dir);
2248 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2249 	if (pol) {
2250 		xfrm_policy_kill(pol);
2251 		return 0;
2252 	}
2253 	return -ENOENT;
2254 }
2255 EXPORT_SYMBOL(xfrm_policy_delete);
2256 
2257 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2258 {
2259 	struct net *net = sock_net(sk);
2260 	struct xfrm_policy *old_pol;
2261 
2262 #ifdef CONFIG_XFRM_SUB_POLICY
2263 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2264 		return -EINVAL;
2265 #endif
2266 
2267 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2268 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2269 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2270 	if (pol) {
2271 		pol->curlft.add_time = ktime_get_real_seconds();
2272 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2273 		xfrm_sk_policy_link(pol, dir);
2274 	}
2275 	rcu_assign_pointer(sk->sk_policy[dir], pol);
2276 	if (old_pol) {
2277 		if (pol)
2278 			xfrm_policy_requeue(old_pol, pol);
2279 
2280 		/* Unlinking succeeds always. This is the only function
2281 		 * allowed to delete or replace socket policy.
2282 		 */
2283 		xfrm_sk_policy_unlink(old_pol, dir);
2284 	}
2285 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2286 
2287 	if (old_pol) {
2288 		xfrm_policy_kill(old_pol);
2289 	}
2290 	return 0;
2291 }
2292 
2293 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2294 {
2295 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2296 	struct net *net = xp_net(old);
2297 
2298 	if (newp) {
2299 		newp->selector = old->selector;
2300 		if (security_xfrm_policy_clone(old->security,
2301 					       &newp->security)) {
2302 			kfree(newp);
2303 			return NULL;  /* ENOMEM */
2304 		}
2305 		newp->lft = old->lft;
2306 		newp->curlft = old->curlft;
2307 		newp->mark = old->mark;
2308 		newp->if_id = old->if_id;
2309 		newp->action = old->action;
2310 		newp->flags = old->flags;
2311 		newp->xfrm_nr = old->xfrm_nr;
2312 		newp->index = old->index;
2313 		newp->type = old->type;
2314 		newp->family = old->family;
2315 		memcpy(newp->xfrm_vec, old->xfrm_vec,
2316 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2317 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2318 		xfrm_sk_policy_link(newp, dir);
2319 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2320 		xfrm_pol_put(newp);
2321 	}
2322 	return newp;
2323 }
2324 
2325 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2326 {
2327 	const struct xfrm_policy *p;
2328 	struct xfrm_policy *np;
2329 	int i, ret = 0;
2330 
2331 	rcu_read_lock();
2332 	for (i = 0; i < 2; i++) {
2333 		p = rcu_dereference(osk->sk_policy[i]);
2334 		if (p) {
2335 			np = clone_policy(p, i);
2336 			if (unlikely(!np)) {
2337 				ret = -ENOMEM;
2338 				break;
2339 			}
2340 			rcu_assign_pointer(sk->sk_policy[i], np);
2341 		}
2342 	}
2343 	rcu_read_unlock();
2344 	return ret;
2345 }
2346 
2347 static int
2348 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2349 	       xfrm_address_t *remote, unsigned short family, u32 mark)
2350 {
2351 	int err;
2352 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2353 
2354 	if (unlikely(afinfo == NULL))
2355 		return -EINVAL;
2356 	err = afinfo->get_saddr(net, oif, local, remote, mark);
2357 	rcu_read_unlock();
2358 	return err;
2359 }
2360 
2361 /* Resolve list of templates for the flow, given policy. */
2362 
2363 static int
2364 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2365 		      struct xfrm_state **xfrm, unsigned short family)
2366 {
2367 	struct net *net = xp_net(policy);
2368 	int nx;
2369 	int i, error;
2370 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2371 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2372 	xfrm_address_t tmp;
2373 
2374 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2375 		struct xfrm_state *x;
2376 		xfrm_address_t *remote = daddr;
2377 		xfrm_address_t *local  = saddr;
2378 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2379 
2380 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2381 		    tmpl->mode == XFRM_MODE_BEET) {
2382 			remote = &tmpl->id.daddr;
2383 			local = &tmpl->saddr;
2384 			if (xfrm_addr_any(local, tmpl->encap_family)) {
2385 				error = xfrm_get_saddr(net, fl->flowi_oif,
2386 						       &tmp, remote,
2387 						       tmpl->encap_family, 0);
2388 				if (error)
2389 					goto fail;
2390 				local = &tmp;
2391 			}
2392 		}
2393 
2394 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2395 				    family, policy->if_id);
2396 
2397 		if (x && x->km.state == XFRM_STATE_VALID) {
2398 			xfrm[nx++] = x;
2399 			daddr = remote;
2400 			saddr = local;
2401 			continue;
2402 		}
2403 		if (x) {
2404 			error = (x->km.state == XFRM_STATE_ERROR ?
2405 				 -EINVAL : -EAGAIN);
2406 			xfrm_state_put(x);
2407 		} else if (error == -ESRCH) {
2408 			error = -EAGAIN;
2409 		}
2410 
2411 		if (!tmpl->optional)
2412 			goto fail;
2413 	}
2414 	return nx;
2415 
2416 fail:
2417 	for (nx--; nx >= 0; nx--)
2418 		xfrm_state_put(xfrm[nx]);
2419 	return error;
2420 }
2421 
2422 static int
2423 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2424 		  struct xfrm_state **xfrm, unsigned short family)
2425 {
2426 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2427 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2428 	int cnx = 0;
2429 	int error;
2430 	int ret;
2431 	int i;
2432 
2433 	for (i = 0; i < npols; i++) {
2434 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2435 			error = -ENOBUFS;
2436 			goto fail;
2437 		}
2438 
2439 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2440 		if (ret < 0) {
2441 			error = ret;
2442 			goto fail;
2443 		} else
2444 			cnx += ret;
2445 	}
2446 
2447 	/* found states are sorted for outbound processing */
2448 	if (npols > 1)
2449 		xfrm_state_sort(xfrm, tpp, cnx, family);
2450 
2451 	return cnx;
2452 
2453  fail:
2454 	for (cnx--; cnx >= 0; cnx--)
2455 		xfrm_state_put(tpp[cnx]);
2456 	return error;
2457 
2458 }
2459 
2460 static int xfrm_get_tos(const struct flowi *fl, int family)
2461 {
2462 	if (family == AF_INET)
2463 		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2464 
2465 	return 0;
2466 }
2467 
2468 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2469 {
2470 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2471 	struct dst_ops *dst_ops;
2472 	struct xfrm_dst *xdst;
2473 
2474 	if (!afinfo)
2475 		return ERR_PTR(-EINVAL);
2476 
2477 	switch (family) {
2478 	case AF_INET:
2479 		dst_ops = &net->xfrm.xfrm4_dst_ops;
2480 		break;
2481 #if IS_ENABLED(CONFIG_IPV6)
2482 	case AF_INET6:
2483 		dst_ops = &net->xfrm.xfrm6_dst_ops;
2484 		break;
2485 #endif
2486 	default:
2487 		BUG();
2488 	}
2489 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2490 
2491 	if (likely(xdst)) {
2492 		struct dst_entry *dst = &xdst->u.dst;
2493 
2494 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2495 	} else
2496 		xdst = ERR_PTR(-ENOBUFS);
2497 
2498 	rcu_read_unlock();
2499 
2500 	return xdst;
2501 }
2502 
2503 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2504 			   int nfheader_len)
2505 {
2506 	if (dst->ops->family == AF_INET6) {
2507 		struct rt6_info *rt = (struct rt6_info *)dst;
2508 		path->path_cookie = rt6_get_cookie(rt);
2509 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2510 	}
2511 }
2512 
2513 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2514 				const struct flowi *fl)
2515 {
2516 	const struct xfrm_policy_afinfo *afinfo =
2517 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2518 	int err;
2519 
2520 	if (!afinfo)
2521 		return -EINVAL;
2522 
2523 	err = afinfo->fill_dst(xdst, dev, fl);
2524 
2525 	rcu_read_unlock();
2526 
2527 	return err;
2528 }
2529 
2530 
2531 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2532  * all the metrics... Shortly, bundle a bundle.
2533  */
2534 
2535 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2536 					    struct xfrm_state **xfrm,
2537 					    struct xfrm_dst **bundle,
2538 					    int nx,
2539 					    const struct flowi *fl,
2540 					    struct dst_entry *dst)
2541 {
2542 	const struct xfrm_state_afinfo *afinfo;
2543 	const struct xfrm_mode *inner_mode;
2544 	struct net *net = xp_net(policy);
2545 	unsigned long now = jiffies;
2546 	struct net_device *dev;
2547 	struct xfrm_dst *xdst_prev = NULL;
2548 	struct xfrm_dst *xdst0 = NULL;
2549 	int i = 0;
2550 	int err;
2551 	int header_len = 0;
2552 	int nfheader_len = 0;
2553 	int trailer_len = 0;
2554 	int tos;
2555 	int family = policy->selector.family;
2556 	xfrm_address_t saddr, daddr;
2557 
2558 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2559 
2560 	tos = xfrm_get_tos(fl, family);
2561 
2562 	dst_hold(dst);
2563 
2564 	for (; i < nx; i++) {
2565 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2566 		struct dst_entry *dst1 = &xdst->u.dst;
2567 
2568 		err = PTR_ERR(xdst);
2569 		if (IS_ERR(xdst)) {
2570 			dst_release(dst);
2571 			goto put_states;
2572 		}
2573 
2574 		bundle[i] = xdst;
2575 		if (!xdst_prev)
2576 			xdst0 = xdst;
2577 		else
2578 			/* Ref count is taken during xfrm_alloc_dst()
2579 			 * No need to do dst_clone() on dst1
2580 			 */
2581 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2582 
2583 		if (xfrm[i]->sel.family == AF_UNSPEC) {
2584 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2585 							xfrm_af2proto(family));
2586 			if (!inner_mode) {
2587 				err = -EAFNOSUPPORT;
2588 				dst_release(dst);
2589 				goto put_states;
2590 			}
2591 		} else
2592 			inner_mode = &xfrm[i]->inner_mode;
2593 
2594 		xdst->route = dst;
2595 		dst_copy_metrics(dst1, dst);
2596 
2597 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2598 			__u32 mark = 0;
2599 
2600 			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2601 				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2602 
2603 			family = xfrm[i]->props.family;
2604 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2605 					      &saddr, &daddr, family, mark);
2606 			err = PTR_ERR(dst);
2607 			if (IS_ERR(dst))
2608 				goto put_states;
2609 		} else
2610 			dst_hold(dst);
2611 
2612 		dst1->xfrm = xfrm[i];
2613 		xdst->xfrm_genid = xfrm[i]->genid;
2614 
2615 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2616 		dst1->flags |= DST_HOST;
2617 		dst1->lastuse = now;
2618 
2619 		dst1->input = dst_discard;
2620 
2621 		rcu_read_lock();
2622 		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2623 		if (likely(afinfo))
2624 			dst1->output = afinfo->output;
2625 		else
2626 			dst1->output = dst_discard_out;
2627 		rcu_read_unlock();
2628 
2629 		xdst_prev = xdst;
2630 
2631 		header_len += xfrm[i]->props.header_len;
2632 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2633 			nfheader_len += xfrm[i]->props.header_len;
2634 		trailer_len += xfrm[i]->props.trailer_len;
2635 	}
2636 
2637 	xfrm_dst_set_child(xdst_prev, dst);
2638 	xdst0->path = dst;
2639 
2640 	err = -ENODEV;
2641 	dev = dst->dev;
2642 	if (!dev)
2643 		goto free_dst;
2644 
2645 	xfrm_init_path(xdst0, dst, nfheader_len);
2646 	xfrm_init_pmtu(bundle, nx);
2647 
2648 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2649 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2650 		err = xfrm_fill_dst(xdst_prev, dev, fl);
2651 		if (err)
2652 			goto free_dst;
2653 
2654 		xdst_prev->u.dst.header_len = header_len;
2655 		xdst_prev->u.dst.trailer_len = trailer_len;
2656 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2657 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2658 	}
2659 
2660 	return &xdst0->u.dst;
2661 
2662 put_states:
2663 	for (; i < nx; i++)
2664 		xfrm_state_put(xfrm[i]);
2665 free_dst:
2666 	if (xdst0)
2667 		dst_release_immediate(&xdst0->u.dst);
2668 
2669 	return ERR_PTR(err);
2670 }
2671 
2672 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2673 				struct xfrm_policy **pols,
2674 				int *num_pols, int *num_xfrms)
2675 {
2676 	int i;
2677 
2678 	if (*num_pols == 0 || !pols[0]) {
2679 		*num_pols = 0;
2680 		*num_xfrms = 0;
2681 		return 0;
2682 	}
2683 	if (IS_ERR(pols[0]))
2684 		return PTR_ERR(pols[0]);
2685 
2686 	*num_xfrms = pols[0]->xfrm_nr;
2687 
2688 #ifdef CONFIG_XFRM_SUB_POLICY
2689 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2690 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2691 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2692 						    XFRM_POLICY_TYPE_MAIN,
2693 						    fl, family,
2694 						    XFRM_POLICY_OUT,
2695 						    pols[0]->if_id);
2696 		if (pols[1]) {
2697 			if (IS_ERR(pols[1])) {
2698 				xfrm_pols_put(pols, *num_pols);
2699 				return PTR_ERR(pols[1]);
2700 			}
2701 			(*num_pols)++;
2702 			(*num_xfrms) += pols[1]->xfrm_nr;
2703 		}
2704 	}
2705 #endif
2706 	for (i = 0; i < *num_pols; i++) {
2707 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2708 			*num_xfrms = -1;
2709 			break;
2710 		}
2711 	}
2712 
2713 	return 0;
2714 
2715 }
2716 
2717 static struct xfrm_dst *
2718 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2719 			       const struct flowi *fl, u16 family,
2720 			       struct dst_entry *dst_orig)
2721 {
2722 	struct net *net = xp_net(pols[0]);
2723 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2724 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2725 	struct xfrm_dst *xdst;
2726 	struct dst_entry *dst;
2727 	int err;
2728 
2729 	/* Try to instantiate a bundle */
2730 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2731 	if (err <= 0) {
2732 		if (err == 0)
2733 			return NULL;
2734 
2735 		if (err != -EAGAIN)
2736 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2737 		return ERR_PTR(err);
2738 	}
2739 
2740 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2741 	if (IS_ERR(dst)) {
2742 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2743 		return ERR_CAST(dst);
2744 	}
2745 
2746 	xdst = (struct xfrm_dst *)dst;
2747 	xdst->num_xfrms = err;
2748 	xdst->num_pols = num_pols;
2749 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2750 	xdst->policy_genid = atomic_read(&pols[0]->genid);
2751 
2752 	return xdst;
2753 }
2754 
2755 static void xfrm_policy_queue_process(struct timer_list *t)
2756 {
2757 	struct sk_buff *skb;
2758 	struct sock *sk;
2759 	struct dst_entry *dst;
2760 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2761 	struct net *net = xp_net(pol);
2762 	struct xfrm_policy_queue *pq = &pol->polq;
2763 	struct flowi fl;
2764 	struct sk_buff_head list;
2765 
2766 	spin_lock(&pq->hold_queue.lock);
2767 	skb = skb_peek(&pq->hold_queue);
2768 	if (!skb) {
2769 		spin_unlock(&pq->hold_queue.lock);
2770 		goto out;
2771 	}
2772 	dst = skb_dst(skb);
2773 	sk = skb->sk;
2774 	xfrm_decode_session(skb, &fl, dst->ops->family);
2775 	spin_unlock(&pq->hold_queue.lock);
2776 
2777 	dst_hold(xfrm_dst_path(dst));
2778 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2779 	if (IS_ERR(dst))
2780 		goto purge_queue;
2781 
2782 	if (dst->flags & DST_XFRM_QUEUE) {
2783 		dst_release(dst);
2784 
2785 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2786 			goto purge_queue;
2787 
2788 		pq->timeout = pq->timeout << 1;
2789 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2790 			xfrm_pol_hold(pol);
2791 		goto out;
2792 	}
2793 
2794 	dst_release(dst);
2795 
2796 	__skb_queue_head_init(&list);
2797 
2798 	spin_lock(&pq->hold_queue.lock);
2799 	pq->timeout = 0;
2800 	skb_queue_splice_init(&pq->hold_queue, &list);
2801 	spin_unlock(&pq->hold_queue.lock);
2802 
2803 	while (!skb_queue_empty(&list)) {
2804 		skb = __skb_dequeue(&list);
2805 
2806 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2807 		dst_hold(xfrm_dst_path(skb_dst(skb)));
2808 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2809 		if (IS_ERR(dst)) {
2810 			kfree_skb(skb);
2811 			continue;
2812 		}
2813 
2814 		nf_reset_ct(skb);
2815 		skb_dst_drop(skb);
2816 		skb_dst_set(skb, dst);
2817 
2818 		dst_output(net, skb->sk, skb);
2819 	}
2820 
2821 out:
2822 	xfrm_pol_put(pol);
2823 	return;
2824 
2825 purge_queue:
2826 	pq->timeout = 0;
2827 	skb_queue_purge(&pq->hold_queue);
2828 	xfrm_pol_put(pol);
2829 }
2830 
2831 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2832 {
2833 	unsigned long sched_next;
2834 	struct dst_entry *dst = skb_dst(skb);
2835 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2836 	struct xfrm_policy *pol = xdst->pols[0];
2837 	struct xfrm_policy_queue *pq = &pol->polq;
2838 
2839 	if (unlikely(skb_fclone_busy(sk, skb))) {
2840 		kfree_skb(skb);
2841 		return 0;
2842 	}
2843 
2844 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2845 		kfree_skb(skb);
2846 		return -EAGAIN;
2847 	}
2848 
2849 	skb_dst_force(skb);
2850 
2851 	spin_lock_bh(&pq->hold_queue.lock);
2852 
2853 	if (!pq->timeout)
2854 		pq->timeout = XFRM_QUEUE_TMO_MIN;
2855 
2856 	sched_next = jiffies + pq->timeout;
2857 
2858 	if (del_timer(&pq->hold_timer)) {
2859 		if (time_before(pq->hold_timer.expires, sched_next))
2860 			sched_next = pq->hold_timer.expires;
2861 		xfrm_pol_put(pol);
2862 	}
2863 
2864 	__skb_queue_tail(&pq->hold_queue, skb);
2865 	if (!mod_timer(&pq->hold_timer, sched_next))
2866 		xfrm_pol_hold(pol);
2867 
2868 	spin_unlock_bh(&pq->hold_queue.lock);
2869 
2870 	return 0;
2871 }
2872 
2873 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2874 						 struct xfrm_flo *xflo,
2875 						 const struct flowi *fl,
2876 						 int num_xfrms,
2877 						 u16 family)
2878 {
2879 	int err;
2880 	struct net_device *dev;
2881 	struct dst_entry *dst;
2882 	struct dst_entry *dst1;
2883 	struct xfrm_dst *xdst;
2884 
2885 	xdst = xfrm_alloc_dst(net, family);
2886 	if (IS_ERR(xdst))
2887 		return xdst;
2888 
2889 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2890 	    net->xfrm.sysctl_larval_drop ||
2891 	    num_xfrms <= 0)
2892 		return xdst;
2893 
2894 	dst = xflo->dst_orig;
2895 	dst1 = &xdst->u.dst;
2896 	dst_hold(dst);
2897 	xdst->route = dst;
2898 
2899 	dst_copy_metrics(dst1, dst);
2900 
2901 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2902 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2903 	dst1->lastuse = jiffies;
2904 
2905 	dst1->input = dst_discard;
2906 	dst1->output = xdst_queue_output;
2907 
2908 	dst_hold(dst);
2909 	xfrm_dst_set_child(xdst, dst);
2910 	xdst->path = dst;
2911 
2912 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2913 
2914 	err = -ENODEV;
2915 	dev = dst->dev;
2916 	if (!dev)
2917 		goto free_dst;
2918 
2919 	err = xfrm_fill_dst(xdst, dev, fl);
2920 	if (err)
2921 		goto free_dst;
2922 
2923 out:
2924 	return xdst;
2925 
2926 free_dst:
2927 	dst_release(dst1);
2928 	xdst = ERR_PTR(err);
2929 	goto out;
2930 }
2931 
2932 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2933 					   const struct flowi *fl,
2934 					   u16 family, u8 dir,
2935 					   struct xfrm_flo *xflo, u32 if_id)
2936 {
2937 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2938 	int num_pols = 0, num_xfrms = 0, err;
2939 	struct xfrm_dst *xdst;
2940 
2941 	/* Resolve policies to use if we couldn't get them from
2942 	 * previous cache entry */
2943 	num_pols = 1;
2944 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2945 	err = xfrm_expand_policies(fl, family, pols,
2946 					   &num_pols, &num_xfrms);
2947 	if (err < 0)
2948 		goto inc_error;
2949 	if (num_pols == 0)
2950 		return NULL;
2951 	if (num_xfrms <= 0)
2952 		goto make_dummy_bundle;
2953 
2954 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2955 					      xflo->dst_orig);
2956 	if (IS_ERR(xdst)) {
2957 		err = PTR_ERR(xdst);
2958 		if (err == -EREMOTE) {
2959 			xfrm_pols_put(pols, num_pols);
2960 			return NULL;
2961 		}
2962 
2963 		if (err != -EAGAIN)
2964 			goto error;
2965 		goto make_dummy_bundle;
2966 	} else if (xdst == NULL) {
2967 		num_xfrms = 0;
2968 		goto make_dummy_bundle;
2969 	}
2970 
2971 	return xdst;
2972 
2973 make_dummy_bundle:
2974 	/* We found policies, but there's no bundles to instantiate:
2975 	 * either because the policy blocks, has no transformations or
2976 	 * we could not build template (no xfrm_states).*/
2977 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2978 	if (IS_ERR(xdst)) {
2979 		xfrm_pols_put(pols, num_pols);
2980 		return ERR_CAST(xdst);
2981 	}
2982 	xdst->num_pols = num_pols;
2983 	xdst->num_xfrms = num_xfrms;
2984 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2985 
2986 	return xdst;
2987 
2988 inc_error:
2989 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2990 error:
2991 	xfrm_pols_put(pols, num_pols);
2992 	return ERR_PTR(err);
2993 }
2994 
2995 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2996 					struct dst_entry *dst_orig)
2997 {
2998 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2999 	struct dst_entry *ret;
3000 
3001 	if (!afinfo) {
3002 		dst_release(dst_orig);
3003 		return ERR_PTR(-EINVAL);
3004 	} else {
3005 		ret = afinfo->blackhole_route(net, dst_orig);
3006 	}
3007 	rcu_read_unlock();
3008 
3009 	return ret;
3010 }
3011 
3012 /* Finds/creates a bundle for given flow and if_id
3013  *
3014  * At the moment we eat a raw IP route. Mostly to speed up lookups
3015  * on interfaces with disabled IPsec.
3016  *
3017  * xfrm_lookup uses an if_id of 0 by default, and is provided for
3018  * compatibility
3019  */
3020 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3021 					struct dst_entry *dst_orig,
3022 					const struct flowi *fl,
3023 					const struct sock *sk,
3024 					int flags, u32 if_id)
3025 {
3026 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3027 	struct xfrm_dst *xdst;
3028 	struct dst_entry *dst, *route;
3029 	u16 family = dst_orig->ops->family;
3030 	u8 dir = XFRM_POLICY_OUT;
3031 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3032 
3033 	dst = NULL;
3034 	xdst = NULL;
3035 	route = NULL;
3036 
3037 	sk = sk_const_to_full_sk(sk);
3038 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3039 		num_pols = 1;
3040 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3041 						if_id);
3042 		err = xfrm_expand_policies(fl, family, pols,
3043 					   &num_pols, &num_xfrms);
3044 		if (err < 0)
3045 			goto dropdst;
3046 
3047 		if (num_pols) {
3048 			if (num_xfrms <= 0) {
3049 				drop_pols = num_pols;
3050 				goto no_transform;
3051 			}
3052 
3053 			xdst = xfrm_resolve_and_create_bundle(
3054 					pols, num_pols, fl,
3055 					family, dst_orig);
3056 
3057 			if (IS_ERR(xdst)) {
3058 				xfrm_pols_put(pols, num_pols);
3059 				err = PTR_ERR(xdst);
3060 				if (err == -EREMOTE)
3061 					goto nopol;
3062 
3063 				goto dropdst;
3064 			} else if (xdst == NULL) {
3065 				num_xfrms = 0;
3066 				drop_pols = num_pols;
3067 				goto no_transform;
3068 			}
3069 
3070 			route = xdst->route;
3071 		}
3072 	}
3073 
3074 	if (xdst == NULL) {
3075 		struct xfrm_flo xflo;
3076 
3077 		xflo.dst_orig = dst_orig;
3078 		xflo.flags = flags;
3079 
3080 		/* To accelerate a bit...  */
3081 		if ((dst_orig->flags & DST_NOXFRM) ||
3082 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
3083 			goto nopol;
3084 
3085 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3086 		if (xdst == NULL)
3087 			goto nopol;
3088 		if (IS_ERR(xdst)) {
3089 			err = PTR_ERR(xdst);
3090 			goto dropdst;
3091 		}
3092 
3093 		num_pols = xdst->num_pols;
3094 		num_xfrms = xdst->num_xfrms;
3095 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3096 		route = xdst->route;
3097 	}
3098 
3099 	dst = &xdst->u.dst;
3100 	if (route == NULL && num_xfrms > 0) {
3101 		/* The only case when xfrm_bundle_lookup() returns a
3102 		 * bundle with null route, is when the template could
3103 		 * not be resolved. It means policies are there, but
3104 		 * bundle could not be created, since we don't yet
3105 		 * have the xfrm_state's. We need to wait for KM to
3106 		 * negotiate new SA's or bail out with error.*/
3107 		if (net->xfrm.sysctl_larval_drop) {
3108 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3109 			err = -EREMOTE;
3110 			goto error;
3111 		}
3112 
3113 		err = -EAGAIN;
3114 
3115 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3116 		goto error;
3117 	}
3118 
3119 no_transform:
3120 	if (num_pols == 0)
3121 		goto nopol;
3122 
3123 	if ((flags & XFRM_LOOKUP_ICMP) &&
3124 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3125 		err = -ENOENT;
3126 		goto error;
3127 	}
3128 
3129 	for (i = 0; i < num_pols; i++)
3130 		pols[i]->curlft.use_time = ktime_get_real_seconds();
3131 
3132 	if (num_xfrms < 0) {
3133 		/* Prohibit the flow */
3134 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3135 		err = -EPERM;
3136 		goto error;
3137 	} else if (num_xfrms > 0) {
3138 		/* Flow transformed */
3139 		dst_release(dst_orig);
3140 	} else {
3141 		/* Flow passes untransformed */
3142 		dst_release(dst);
3143 		dst = dst_orig;
3144 	}
3145 ok:
3146 	xfrm_pols_put(pols, drop_pols);
3147 	if (dst && dst->xfrm &&
3148 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3149 		dst->flags |= DST_XFRM_TUNNEL;
3150 	return dst;
3151 
3152 nopol:
3153 	if (!(flags & XFRM_LOOKUP_ICMP)) {
3154 		dst = dst_orig;
3155 		goto ok;
3156 	}
3157 	err = -ENOENT;
3158 error:
3159 	dst_release(dst);
3160 dropdst:
3161 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3162 		dst_release(dst_orig);
3163 	xfrm_pols_put(pols, drop_pols);
3164 	return ERR_PTR(err);
3165 }
3166 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3167 
3168 /* Main function: finds/creates a bundle for given flow.
3169  *
3170  * At the moment we eat a raw IP route. Mostly to speed up lookups
3171  * on interfaces with disabled IPsec.
3172  */
3173 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3174 			      const struct flowi *fl, const struct sock *sk,
3175 			      int flags)
3176 {
3177 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3178 }
3179 EXPORT_SYMBOL(xfrm_lookup);
3180 
3181 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3182  * Otherwise we may send out blackholed packets.
3183  */
3184 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3185 				    const struct flowi *fl,
3186 				    const struct sock *sk, int flags)
3187 {
3188 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3189 					    flags | XFRM_LOOKUP_QUEUE |
3190 					    XFRM_LOOKUP_KEEP_DST_REF);
3191 
3192 	if (PTR_ERR(dst) == -EREMOTE)
3193 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3194 
3195 	if (IS_ERR(dst))
3196 		dst_release(dst_orig);
3197 
3198 	return dst;
3199 }
3200 EXPORT_SYMBOL(xfrm_lookup_route);
3201 
3202 static inline int
3203 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3204 {
3205 	struct sec_path *sp = skb_sec_path(skb);
3206 	struct xfrm_state *x;
3207 
3208 	if (!sp || idx < 0 || idx >= sp->len)
3209 		return 0;
3210 	x = sp->xvec[idx];
3211 	if (!x->type->reject)
3212 		return 0;
3213 	return x->type->reject(x, skb, fl);
3214 }
3215 
3216 /* When skb is transformed back to its "native" form, we have to
3217  * check policy restrictions. At the moment we make this in maximally
3218  * stupid way. Shame on me. :-) Of course, connected sockets must
3219  * have policy cached at them.
3220  */
3221 
3222 static inline int
3223 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3224 	      unsigned short family)
3225 {
3226 	if (xfrm_state_kern(x))
3227 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3228 	return	x->id.proto == tmpl->id.proto &&
3229 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3230 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3231 		x->props.mode == tmpl->mode &&
3232 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3233 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3234 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3235 		  xfrm_state_addr_cmp(tmpl, x, family));
3236 }
3237 
3238 /*
3239  * 0 or more than 0 is returned when validation is succeeded (either bypass
3240  * because of optional transport mode, or next index of the mathced secpath
3241  * state with the template.
3242  * -1 is returned when no matching template is found.
3243  * Otherwise "-2 - errored_index" is returned.
3244  */
3245 static inline int
3246 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3247 	       unsigned short family)
3248 {
3249 	int idx = start;
3250 
3251 	if (tmpl->optional) {
3252 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3253 			return start;
3254 	} else
3255 		start = -1;
3256 	for (; idx < sp->len; idx++) {
3257 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3258 			return ++idx;
3259 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3260 			if (start == -1)
3261 				start = -2-idx;
3262 			break;
3263 		}
3264 	}
3265 	return start;
3266 }
3267 
3268 static void
3269 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3270 {
3271 	const struct iphdr *iph = ip_hdr(skb);
3272 	int ihl = iph->ihl;
3273 	u8 *xprth = skb_network_header(skb) + ihl * 4;
3274 	struct flowi4 *fl4 = &fl->u.ip4;
3275 	int oif = 0;
3276 
3277 	if (skb_dst(skb) && skb_dst(skb)->dev)
3278 		oif = skb_dst(skb)->dev->ifindex;
3279 
3280 	memset(fl4, 0, sizeof(struct flowi4));
3281 	fl4->flowi4_mark = skb->mark;
3282 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3283 
3284 	fl4->flowi4_proto = iph->protocol;
3285 	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3286 	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3287 	fl4->flowi4_tos = iph->tos;
3288 
3289 	if (!ip_is_fragment(iph)) {
3290 		switch (iph->protocol) {
3291 		case IPPROTO_UDP:
3292 		case IPPROTO_UDPLITE:
3293 		case IPPROTO_TCP:
3294 		case IPPROTO_SCTP:
3295 		case IPPROTO_DCCP:
3296 			if (xprth + 4 < skb->data ||
3297 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3298 				__be16 *ports;
3299 
3300 				xprth = skb_network_header(skb) + ihl * 4;
3301 				ports = (__be16 *)xprth;
3302 
3303 				fl4->fl4_sport = ports[!!reverse];
3304 				fl4->fl4_dport = ports[!reverse];
3305 			}
3306 			break;
3307 		case IPPROTO_ICMP:
3308 			if (xprth + 2 < skb->data ||
3309 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3310 				u8 *icmp;
3311 
3312 				xprth = skb_network_header(skb) + ihl * 4;
3313 				icmp = xprth;
3314 
3315 				fl4->fl4_icmp_type = icmp[0];
3316 				fl4->fl4_icmp_code = icmp[1];
3317 			}
3318 			break;
3319 		case IPPROTO_ESP:
3320 			if (xprth + 4 < skb->data ||
3321 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3322 				__be32 *ehdr;
3323 
3324 				xprth = skb_network_header(skb) + ihl * 4;
3325 				ehdr = (__be32 *)xprth;
3326 
3327 				fl4->fl4_ipsec_spi = ehdr[0];
3328 			}
3329 			break;
3330 		case IPPROTO_AH:
3331 			if (xprth + 8 < skb->data ||
3332 			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
3333 				__be32 *ah_hdr;
3334 
3335 				xprth = skb_network_header(skb) + ihl * 4;
3336 				ah_hdr = (__be32 *)xprth;
3337 
3338 				fl4->fl4_ipsec_spi = ah_hdr[1];
3339 			}
3340 			break;
3341 		case IPPROTO_COMP:
3342 			if (xprth + 4 < skb->data ||
3343 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3344 				__be16 *ipcomp_hdr;
3345 
3346 				xprth = skb_network_header(skb) + ihl * 4;
3347 				ipcomp_hdr = (__be16 *)xprth;
3348 
3349 				fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3350 			}
3351 			break;
3352 		case IPPROTO_GRE:
3353 			if (xprth + 12 < skb->data ||
3354 			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3355 				__be16 *greflags;
3356 				__be32 *gre_hdr;
3357 
3358 				xprth = skb_network_header(skb) + ihl * 4;
3359 				greflags = (__be16 *)xprth;
3360 				gre_hdr = (__be32 *)xprth;
3361 
3362 				if (greflags[0] & GRE_KEY) {
3363 					if (greflags[0] & GRE_CSUM)
3364 						gre_hdr++;
3365 					fl4->fl4_gre_key = gre_hdr[1];
3366 				}
3367 			}
3368 			break;
3369 		default:
3370 			fl4->fl4_ipsec_spi = 0;
3371 			break;
3372 		}
3373 	}
3374 }
3375 
3376 #if IS_ENABLED(CONFIG_IPV6)
3377 static void
3378 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3379 {
3380 	struct flowi6 *fl6 = &fl->u.ip6;
3381 	int onlyproto = 0;
3382 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3383 	u32 offset = sizeof(*hdr);
3384 	struct ipv6_opt_hdr *exthdr;
3385 	const unsigned char *nh = skb_network_header(skb);
3386 	u16 nhoff = IP6CB(skb)->nhoff;
3387 	int oif = 0;
3388 	u8 nexthdr;
3389 
3390 	if (!nhoff)
3391 		nhoff = offsetof(struct ipv6hdr, nexthdr);
3392 
3393 	nexthdr = nh[nhoff];
3394 
3395 	if (skb_dst(skb) && skb_dst(skb)->dev)
3396 		oif = skb_dst(skb)->dev->ifindex;
3397 
3398 	memset(fl6, 0, sizeof(struct flowi6));
3399 	fl6->flowi6_mark = skb->mark;
3400 	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3401 
3402 	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3403 	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3404 
3405 	while (nh + offset + sizeof(*exthdr) < skb->data ||
3406 	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3407 		nh = skb_network_header(skb);
3408 		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3409 
3410 		switch (nexthdr) {
3411 		case NEXTHDR_FRAGMENT:
3412 			onlyproto = 1;
3413 			/* fall through */
3414 		case NEXTHDR_ROUTING:
3415 		case NEXTHDR_HOP:
3416 		case NEXTHDR_DEST:
3417 			offset += ipv6_optlen(exthdr);
3418 			nexthdr = exthdr->nexthdr;
3419 			exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3420 			break;
3421 		case IPPROTO_UDP:
3422 		case IPPROTO_UDPLITE:
3423 		case IPPROTO_TCP:
3424 		case IPPROTO_SCTP:
3425 		case IPPROTO_DCCP:
3426 			if (!onlyproto && (nh + offset + 4 < skb->data ||
3427 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3428 				__be16 *ports;
3429 
3430 				nh = skb_network_header(skb);
3431 				ports = (__be16 *)(nh + offset);
3432 				fl6->fl6_sport = ports[!!reverse];
3433 				fl6->fl6_dport = ports[!reverse];
3434 			}
3435 			fl6->flowi6_proto = nexthdr;
3436 			return;
3437 		case IPPROTO_ICMPV6:
3438 			if (!onlyproto && (nh + offset + 2 < skb->data ||
3439 			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3440 				u8 *icmp;
3441 
3442 				nh = skb_network_header(skb);
3443 				icmp = (u8 *)(nh + offset);
3444 				fl6->fl6_icmp_type = icmp[0];
3445 				fl6->fl6_icmp_code = icmp[1];
3446 			}
3447 			fl6->flowi6_proto = nexthdr;
3448 			return;
3449 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3450 		case IPPROTO_MH:
3451 			offset += ipv6_optlen(exthdr);
3452 			if (!onlyproto && (nh + offset + 3 < skb->data ||
3453 			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3454 				struct ip6_mh *mh;
3455 
3456 				nh = skb_network_header(skb);
3457 				mh = (struct ip6_mh *)(nh + offset);
3458 				fl6->fl6_mh_type = mh->ip6mh_type;
3459 			}
3460 			fl6->flowi6_proto = nexthdr;
3461 			return;
3462 #endif
3463 		/* XXX Why are there these headers? */
3464 		case IPPROTO_AH:
3465 		case IPPROTO_ESP:
3466 		case IPPROTO_COMP:
3467 		default:
3468 			fl6->fl6_ipsec_spi = 0;
3469 			fl6->flowi6_proto = nexthdr;
3470 			return;
3471 		}
3472 	}
3473 }
3474 #endif
3475 
3476 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3477 			  unsigned int family, int reverse)
3478 {
3479 	switch (family) {
3480 	case AF_INET:
3481 		decode_session4(skb, fl, reverse);
3482 		break;
3483 #if IS_ENABLED(CONFIG_IPV6)
3484 	case AF_INET6:
3485 		decode_session6(skb, fl, reverse);
3486 		break;
3487 #endif
3488 	default:
3489 		return -EAFNOSUPPORT;
3490 	}
3491 
3492 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3493 }
3494 EXPORT_SYMBOL(__xfrm_decode_session);
3495 
3496 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3497 {
3498 	for (; k < sp->len; k++) {
3499 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3500 			*idxp = k;
3501 			return 1;
3502 		}
3503 	}
3504 
3505 	return 0;
3506 }
3507 
3508 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3509 			unsigned short family)
3510 {
3511 	struct net *net = dev_net(skb->dev);
3512 	struct xfrm_policy *pol;
3513 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3514 	int npols = 0;
3515 	int xfrm_nr;
3516 	int pi;
3517 	int reverse;
3518 	struct flowi fl;
3519 	int xerr_idx = -1;
3520 	const struct xfrm_if_cb *ifcb;
3521 	struct sec_path *sp;
3522 	struct xfrm_if *xi;
3523 	u32 if_id = 0;
3524 
3525 	rcu_read_lock();
3526 	ifcb = xfrm_if_get_cb();
3527 
3528 	if (ifcb) {
3529 		xi = ifcb->decode_session(skb, family);
3530 		if (xi) {
3531 			if_id = xi->p.if_id;
3532 			net = xi->net;
3533 		}
3534 	}
3535 	rcu_read_unlock();
3536 
3537 	reverse = dir & ~XFRM_POLICY_MASK;
3538 	dir &= XFRM_POLICY_MASK;
3539 
3540 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3541 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3542 		return 0;
3543 	}
3544 
3545 	nf_nat_decode_session(skb, &fl, family);
3546 
3547 	/* First, check used SA against their selectors. */
3548 	sp = skb_sec_path(skb);
3549 	if (sp) {
3550 		int i;
3551 
3552 		for (i = sp->len - 1; i >= 0; i--) {
3553 			struct xfrm_state *x = sp->xvec[i];
3554 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3555 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3556 				return 0;
3557 			}
3558 		}
3559 	}
3560 
3561 	pol = NULL;
3562 	sk = sk_to_full_sk(sk);
3563 	if (sk && sk->sk_policy[dir]) {
3564 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3565 		if (IS_ERR(pol)) {
3566 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3567 			return 0;
3568 		}
3569 	}
3570 
3571 	if (!pol)
3572 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3573 
3574 	if (IS_ERR(pol)) {
3575 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3576 		return 0;
3577 	}
3578 
3579 	if (!pol) {
3580 		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3581 			xfrm_secpath_reject(xerr_idx, skb, &fl);
3582 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3583 			return 0;
3584 		}
3585 		return 1;
3586 	}
3587 
3588 	pol->curlft.use_time = ktime_get_real_seconds();
3589 
3590 	pols[0] = pol;
3591 	npols++;
3592 #ifdef CONFIG_XFRM_SUB_POLICY
3593 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3594 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3595 						    &fl, family,
3596 						    XFRM_POLICY_IN, if_id);
3597 		if (pols[1]) {
3598 			if (IS_ERR(pols[1])) {
3599 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3600 				return 0;
3601 			}
3602 			pols[1]->curlft.use_time = ktime_get_real_seconds();
3603 			npols++;
3604 		}
3605 	}
3606 #endif
3607 
3608 	if (pol->action == XFRM_POLICY_ALLOW) {
3609 		static struct sec_path dummy;
3610 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3611 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3612 		struct xfrm_tmpl **tpp = tp;
3613 		int ti = 0;
3614 		int i, k;
3615 
3616 		sp = skb_sec_path(skb);
3617 		if (!sp)
3618 			sp = &dummy;
3619 
3620 		for (pi = 0; pi < npols; pi++) {
3621 			if (pols[pi] != pol &&
3622 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3623 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3624 				goto reject;
3625 			}
3626 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3627 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3628 				goto reject_error;
3629 			}
3630 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3631 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3632 		}
3633 		xfrm_nr = ti;
3634 		if (npols > 1) {
3635 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3636 			tpp = stp;
3637 		}
3638 
3639 		/* For each tunnel xfrm, find the first matching tmpl.
3640 		 * For each tmpl before that, find corresponding xfrm.
3641 		 * Order is _important_. Later we will implement
3642 		 * some barriers, but at the moment barriers
3643 		 * are implied between each two transformations.
3644 		 */
3645 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3646 			k = xfrm_policy_ok(tpp[i], sp, k, family);
3647 			if (k < 0) {
3648 				if (k < -1)
3649 					/* "-2 - errored_index" returned */
3650 					xerr_idx = -(2+k);
3651 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3652 				goto reject;
3653 			}
3654 		}
3655 
3656 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3657 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3658 			goto reject;
3659 		}
3660 
3661 		xfrm_pols_put(pols, npols);
3662 		return 1;
3663 	}
3664 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3665 
3666 reject:
3667 	xfrm_secpath_reject(xerr_idx, skb, &fl);
3668 reject_error:
3669 	xfrm_pols_put(pols, npols);
3670 	return 0;
3671 }
3672 EXPORT_SYMBOL(__xfrm_policy_check);
3673 
3674 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3675 {
3676 	struct net *net = dev_net(skb->dev);
3677 	struct flowi fl;
3678 	struct dst_entry *dst;
3679 	int res = 1;
3680 
3681 	if (xfrm_decode_session(skb, &fl, family) < 0) {
3682 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3683 		return 0;
3684 	}
3685 
3686 	skb_dst_force(skb);
3687 	if (!skb_dst(skb)) {
3688 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3689 		return 0;
3690 	}
3691 
3692 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3693 	if (IS_ERR(dst)) {
3694 		res = 0;
3695 		dst = NULL;
3696 	}
3697 	skb_dst_set(skb, dst);
3698 	return res;
3699 }
3700 EXPORT_SYMBOL(__xfrm_route_forward);
3701 
3702 /* Optimize later using cookies and generation ids. */
3703 
3704 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3705 {
3706 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3707 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3708 	 * get validated by dst_ops->check on every use.  We do this
3709 	 * because when a normal route referenced by an XFRM dst is
3710 	 * obsoleted we do not go looking around for all parent
3711 	 * referencing XFRM dsts so that we can invalidate them.  It
3712 	 * is just too much work.  Instead we make the checks here on
3713 	 * every use.  For example:
3714 	 *
3715 	 *	XFRM dst A --> IPv4 dst X
3716 	 *
3717 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3718 	 * in this example).  If X is marked obsolete, "A" will not
3719 	 * notice.  That's what we are validating here via the
3720 	 * stale_bundle() check.
3721 	 *
3722 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3723 	 * be marked on it.
3724 	 * This will force stale_bundle() to fail on any xdst bundle with
3725 	 * this dst linked in it.
3726 	 */
3727 	if (dst->obsolete < 0 && !stale_bundle(dst))
3728 		return dst;
3729 
3730 	return NULL;
3731 }
3732 
3733 static int stale_bundle(struct dst_entry *dst)
3734 {
3735 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3736 }
3737 
3738 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3739 {
3740 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3741 		dst->dev = dev_net(dev)->loopback_dev;
3742 		dev_hold(dst->dev);
3743 		dev_put(dev);
3744 	}
3745 }
3746 EXPORT_SYMBOL(xfrm_dst_ifdown);
3747 
3748 static void xfrm_link_failure(struct sk_buff *skb)
3749 {
3750 	/* Impossible. Such dst must be popped before reaches point of failure. */
3751 }
3752 
3753 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3754 {
3755 	if (dst) {
3756 		if (dst->obsolete) {
3757 			dst_release(dst);
3758 			dst = NULL;
3759 		}
3760 	}
3761 	return dst;
3762 }
3763 
3764 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3765 {
3766 	while (nr--) {
3767 		struct xfrm_dst *xdst = bundle[nr];
3768 		u32 pmtu, route_mtu_cached;
3769 		struct dst_entry *dst;
3770 
3771 		dst = &xdst->u.dst;
3772 		pmtu = dst_mtu(xfrm_dst_child(dst));
3773 		xdst->child_mtu_cached = pmtu;
3774 
3775 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3776 
3777 		route_mtu_cached = dst_mtu(xdst->route);
3778 		xdst->route_mtu_cached = route_mtu_cached;
3779 
3780 		if (pmtu > route_mtu_cached)
3781 			pmtu = route_mtu_cached;
3782 
3783 		dst_metric_set(dst, RTAX_MTU, pmtu);
3784 	}
3785 }
3786 
3787 /* Check that the bundle accepts the flow and its components are
3788  * still valid.
3789  */
3790 
3791 static int xfrm_bundle_ok(struct xfrm_dst *first)
3792 {
3793 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3794 	struct dst_entry *dst = &first->u.dst;
3795 	struct xfrm_dst *xdst;
3796 	int start_from, nr;
3797 	u32 mtu;
3798 
3799 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3800 	    (dst->dev && !netif_running(dst->dev)))
3801 		return 0;
3802 
3803 	if (dst->flags & DST_XFRM_QUEUE)
3804 		return 1;
3805 
3806 	start_from = nr = 0;
3807 	do {
3808 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3809 
3810 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3811 			return 0;
3812 		if (xdst->xfrm_genid != dst->xfrm->genid)
3813 			return 0;
3814 		if (xdst->num_pols > 0 &&
3815 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3816 			return 0;
3817 
3818 		bundle[nr++] = xdst;
3819 
3820 		mtu = dst_mtu(xfrm_dst_child(dst));
3821 		if (xdst->child_mtu_cached != mtu) {
3822 			start_from = nr;
3823 			xdst->child_mtu_cached = mtu;
3824 		}
3825 
3826 		if (!dst_check(xdst->route, xdst->route_cookie))
3827 			return 0;
3828 		mtu = dst_mtu(xdst->route);
3829 		if (xdst->route_mtu_cached != mtu) {
3830 			start_from = nr;
3831 			xdst->route_mtu_cached = mtu;
3832 		}
3833 
3834 		dst = xfrm_dst_child(dst);
3835 	} while (dst->xfrm);
3836 
3837 	if (likely(!start_from))
3838 		return 1;
3839 
3840 	xdst = bundle[start_from - 1];
3841 	mtu = xdst->child_mtu_cached;
3842 	while (start_from--) {
3843 		dst = &xdst->u.dst;
3844 
3845 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3846 		if (mtu > xdst->route_mtu_cached)
3847 			mtu = xdst->route_mtu_cached;
3848 		dst_metric_set(dst, RTAX_MTU, mtu);
3849 		if (!start_from)
3850 			break;
3851 
3852 		xdst = bundle[start_from - 1];
3853 		xdst->child_mtu_cached = mtu;
3854 	}
3855 
3856 	return 1;
3857 }
3858 
3859 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3860 {
3861 	return dst_metric_advmss(xfrm_dst_path(dst));
3862 }
3863 
3864 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3865 {
3866 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3867 
3868 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3869 }
3870 
3871 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3872 					const void *daddr)
3873 {
3874 	while (dst->xfrm) {
3875 		const struct xfrm_state *xfrm = dst->xfrm;
3876 
3877 		dst = xfrm_dst_child(dst);
3878 
3879 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3880 			continue;
3881 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3882 			daddr = xfrm->coaddr;
3883 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3884 			daddr = &xfrm->id.daddr;
3885 	}
3886 	return daddr;
3887 }
3888 
3889 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3890 					   struct sk_buff *skb,
3891 					   const void *daddr)
3892 {
3893 	const struct dst_entry *path = xfrm_dst_path(dst);
3894 
3895 	if (!skb)
3896 		daddr = xfrm_get_dst_nexthop(dst, daddr);
3897 	return path->ops->neigh_lookup(path, skb, daddr);
3898 }
3899 
3900 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3901 {
3902 	const struct dst_entry *path = xfrm_dst_path(dst);
3903 
3904 	daddr = xfrm_get_dst_nexthop(dst, daddr);
3905 	path->ops->confirm_neigh(path, daddr);
3906 }
3907 
3908 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3909 {
3910 	int err = 0;
3911 
3912 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3913 		return -EAFNOSUPPORT;
3914 
3915 	spin_lock(&xfrm_policy_afinfo_lock);
3916 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3917 		err = -EEXIST;
3918 	else {
3919 		struct dst_ops *dst_ops = afinfo->dst_ops;
3920 		if (likely(dst_ops->kmem_cachep == NULL))
3921 			dst_ops->kmem_cachep = xfrm_dst_cache;
3922 		if (likely(dst_ops->check == NULL))
3923 			dst_ops->check = xfrm_dst_check;
3924 		if (likely(dst_ops->default_advmss == NULL))
3925 			dst_ops->default_advmss = xfrm_default_advmss;
3926 		if (likely(dst_ops->mtu == NULL))
3927 			dst_ops->mtu = xfrm_mtu;
3928 		if (likely(dst_ops->negative_advice == NULL))
3929 			dst_ops->negative_advice = xfrm_negative_advice;
3930 		if (likely(dst_ops->link_failure == NULL))
3931 			dst_ops->link_failure = xfrm_link_failure;
3932 		if (likely(dst_ops->neigh_lookup == NULL))
3933 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3934 		if (likely(!dst_ops->confirm_neigh))
3935 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3936 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3937 	}
3938 	spin_unlock(&xfrm_policy_afinfo_lock);
3939 
3940 	return err;
3941 }
3942 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3943 
3944 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3945 {
3946 	struct dst_ops *dst_ops = afinfo->dst_ops;
3947 	int i;
3948 
3949 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3950 		if (xfrm_policy_afinfo[i] != afinfo)
3951 			continue;
3952 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3953 		break;
3954 	}
3955 
3956 	synchronize_rcu();
3957 
3958 	dst_ops->kmem_cachep = NULL;
3959 	dst_ops->check = NULL;
3960 	dst_ops->negative_advice = NULL;
3961 	dst_ops->link_failure = NULL;
3962 }
3963 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3964 
3965 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3966 {
3967 	spin_lock(&xfrm_if_cb_lock);
3968 	rcu_assign_pointer(xfrm_if_cb, ifcb);
3969 	spin_unlock(&xfrm_if_cb_lock);
3970 }
3971 EXPORT_SYMBOL(xfrm_if_register_cb);
3972 
3973 void xfrm_if_unregister_cb(void)
3974 {
3975 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3976 	synchronize_rcu();
3977 }
3978 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3979 
3980 #ifdef CONFIG_XFRM_STATISTICS
3981 static int __net_init xfrm_statistics_init(struct net *net)
3982 {
3983 	int rv;
3984 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3985 	if (!net->mib.xfrm_statistics)
3986 		return -ENOMEM;
3987 	rv = xfrm_proc_init(net);
3988 	if (rv < 0)
3989 		free_percpu(net->mib.xfrm_statistics);
3990 	return rv;
3991 }
3992 
3993 static void xfrm_statistics_fini(struct net *net)
3994 {
3995 	xfrm_proc_fini(net);
3996 	free_percpu(net->mib.xfrm_statistics);
3997 }
3998 #else
3999 static int __net_init xfrm_statistics_init(struct net *net)
4000 {
4001 	return 0;
4002 }
4003 
4004 static void xfrm_statistics_fini(struct net *net)
4005 {
4006 }
4007 #endif
4008 
4009 static int __net_init xfrm_policy_init(struct net *net)
4010 {
4011 	unsigned int hmask, sz;
4012 	int dir, err;
4013 
4014 	if (net_eq(net, &init_net)) {
4015 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4016 					   sizeof(struct xfrm_dst),
4017 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4018 					   NULL);
4019 		err = rhashtable_init(&xfrm_policy_inexact_table,
4020 				      &xfrm_pol_inexact_params);
4021 		BUG_ON(err);
4022 	}
4023 
4024 	hmask = 8 - 1;
4025 	sz = (hmask+1) * sizeof(struct hlist_head);
4026 
4027 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4028 	if (!net->xfrm.policy_byidx)
4029 		goto out_byidx;
4030 	net->xfrm.policy_idx_hmask = hmask;
4031 
4032 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4033 		struct xfrm_policy_hash *htab;
4034 
4035 		net->xfrm.policy_count[dir] = 0;
4036 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4037 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4038 
4039 		htab = &net->xfrm.policy_bydst[dir];
4040 		htab->table = xfrm_hash_alloc(sz);
4041 		if (!htab->table)
4042 			goto out_bydst;
4043 		htab->hmask = hmask;
4044 		htab->dbits4 = 32;
4045 		htab->sbits4 = 32;
4046 		htab->dbits6 = 128;
4047 		htab->sbits6 = 128;
4048 	}
4049 	net->xfrm.policy_hthresh.lbits4 = 32;
4050 	net->xfrm.policy_hthresh.rbits4 = 32;
4051 	net->xfrm.policy_hthresh.lbits6 = 128;
4052 	net->xfrm.policy_hthresh.rbits6 = 128;
4053 
4054 	seqlock_init(&net->xfrm.policy_hthresh.lock);
4055 
4056 	INIT_LIST_HEAD(&net->xfrm.policy_all);
4057 	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4058 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4059 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4060 	return 0;
4061 
4062 out_bydst:
4063 	for (dir--; dir >= 0; dir--) {
4064 		struct xfrm_policy_hash *htab;
4065 
4066 		htab = &net->xfrm.policy_bydst[dir];
4067 		xfrm_hash_free(htab->table, sz);
4068 	}
4069 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4070 out_byidx:
4071 	return -ENOMEM;
4072 }
4073 
4074 static void xfrm_policy_fini(struct net *net)
4075 {
4076 	struct xfrm_pol_inexact_bin *b, *t;
4077 	unsigned int sz;
4078 	int dir;
4079 
4080 	flush_work(&net->xfrm.policy_hash_work);
4081 #ifdef CONFIG_XFRM_SUB_POLICY
4082 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4083 #endif
4084 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4085 
4086 	WARN_ON(!list_empty(&net->xfrm.policy_all));
4087 
4088 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4089 		struct xfrm_policy_hash *htab;
4090 
4091 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4092 
4093 		htab = &net->xfrm.policy_bydst[dir];
4094 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4095 		WARN_ON(!hlist_empty(htab->table));
4096 		xfrm_hash_free(htab->table, sz);
4097 	}
4098 
4099 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4100 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4101 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4102 
4103 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4104 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4105 		__xfrm_policy_inexact_prune_bin(b, true);
4106 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4107 }
4108 
4109 static int __net_init xfrm_net_init(struct net *net)
4110 {
4111 	int rv;
4112 
4113 	/* Initialize the per-net locks here */
4114 	spin_lock_init(&net->xfrm.xfrm_state_lock);
4115 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4116 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4117 
4118 	rv = xfrm_statistics_init(net);
4119 	if (rv < 0)
4120 		goto out_statistics;
4121 	rv = xfrm_state_init(net);
4122 	if (rv < 0)
4123 		goto out_state;
4124 	rv = xfrm_policy_init(net);
4125 	if (rv < 0)
4126 		goto out_policy;
4127 	rv = xfrm_sysctl_init(net);
4128 	if (rv < 0)
4129 		goto out_sysctl;
4130 
4131 	return 0;
4132 
4133 out_sysctl:
4134 	xfrm_policy_fini(net);
4135 out_policy:
4136 	xfrm_state_fini(net);
4137 out_state:
4138 	xfrm_statistics_fini(net);
4139 out_statistics:
4140 	return rv;
4141 }
4142 
4143 static void __net_exit xfrm_net_exit(struct net *net)
4144 {
4145 	xfrm_sysctl_fini(net);
4146 	xfrm_policy_fini(net);
4147 	xfrm_state_fini(net);
4148 	xfrm_statistics_fini(net);
4149 }
4150 
4151 static struct pernet_operations __net_initdata xfrm_net_ops = {
4152 	.init = xfrm_net_init,
4153 	.exit = xfrm_net_exit,
4154 };
4155 
4156 void __init xfrm_init(void)
4157 {
4158 	register_pernet_subsys(&xfrm_net_ops);
4159 	xfrm_dev_init();
4160 	seqcount_init(&xfrm_policy_hash_generation);
4161 	xfrm_input_init();
4162 
4163 #ifdef CONFIG_INET_ESPINTCP
4164 	espintcp_init();
4165 #endif
4166 
4167 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4168 	synchronize_rcu();
4169 }
4170 
4171 #ifdef CONFIG_AUDITSYSCALL
4172 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4173 					 struct audit_buffer *audit_buf)
4174 {
4175 	struct xfrm_sec_ctx *ctx = xp->security;
4176 	struct xfrm_selector *sel = &xp->selector;
4177 
4178 	if (ctx)
4179 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4180 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4181 
4182 	switch (sel->family) {
4183 	case AF_INET:
4184 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4185 		if (sel->prefixlen_s != 32)
4186 			audit_log_format(audit_buf, " src_prefixlen=%d",
4187 					 sel->prefixlen_s);
4188 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4189 		if (sel->prefixlen_d != 32)
4190 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4191 					 sel->prefixlen_d);
4192 		break;
4193 	case AF_INET6:
4194 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4195 		if (sel->prefixlen_s != 128)
4196 			audit_log_format(audit_buf, " src_prefixlen=%d",
4197 					 sel->prefixlen_s);
4198 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4199 		if (sel->prefixlen_d != 128)
4200 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4201 					 sel->prefixlen_d);
4202 		break;
4203 	}
4204 }
4205 
4206 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4207 {
4208 	struct audit_buffer *audit_buf;
4209 
4210 	audit_buf = xfrm_audit_start("SPD-add");
4211 	if (audit_buf == NULL)
4212 		return;
4213 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4214 	audit_log_format(audit_buf, " res=%u", result);
4215 	xfrm_audit_common_policyinfo(xp, audit_buf);
4216 	audit_log_end(audit_buf);
4217 }
4218 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4219 
4220 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4221 			      bool task_valid)
4222 {
4223 	struct audit_buffer *audit_buf;
4224 
4225 	audit_buf = xfrm_audit_start("SPD-delete");
4226 	if (audit_buf == NULL)
4227 		return;
4228 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4229 	audit_log_format(audit_buf, " res=%u", result);
4230 	xfrm_audit_common_policyinfo(xp, audit_buf);
4231 	audit_log_end(audit_buf);
4232 }
4233 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4234 #endif
4235 
4236 #ifdef CONFIG_XFRM_MIGRATE
4237 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4238 					const struct xfrm_selector *sel_tgt)
4239 {
4240 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4241 		if (sel_tgt->family == sel_cmp->family &&
4242 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4243 				    sel_cmp->family) &&
4244 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4245 				    sel_cmp->family) &&
4246 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4247 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4248 			return true;
4249 		}
4250 	} else {
4251 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4252 			return true;
4253 		}
4254 	}
4255 	return false;
4256 }
4257 
4258 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4259 						    u8 dir, u8 type, struct net *net)
4260 {
4261 	struct xfrm_policy *pol, *ret = NULL;
4262 	struct hlist_head *chain;
4263 	u32 priority = ~0U;
4264 
4265 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4266 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4267 	hlist_for_each_entry(pol, chain, bydst) {
4268 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4269 		    pol->type == type) {
4270 			ret = pol;
4271 			priority = ret->priority;
4272 			break;
4273 		}
4274 	}
4275 	chain = &net->xfrm.policy_inexact[dir];
4276 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4277 		if ((pol->priority >= priority) && ret)
4278 			break;
4279 
4280 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4281 		    pol->type == type) {
4282 			ret = pol;
4283 			break;
4284 		}
4285 	}
4286 
4287 	xfrm_pol_hold(ret);
4288 
4289 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4290 
4291 	return ret;
4292 }
4293 
4294 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4295 {
4296 	int match = 0;
4297 
4298 	if (t->mode == m->mode && t->id.proto == m->proto &&
4299 	    (m->reqid == 0 || t->reqid == m->reqid)) {
4300 		switch (t->mode) {
4301 		case XFRM_MODE_TUNNEL:
4302 		case XFRM_MODE_BEET:
4303 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4304 					    m->old_family) &&
4305 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4306 					    m->old_family)) {
4307 				match = 1;
4308 			}
4309 			break;
4310 		case XFRM_MODE_TRANSPORT:
4311 			/* in case of transport mode, template does not store
4312 			   any IP addresses, hence we just compare mode and
4313 			   protocol */
4314 			match = 1;
4315 			break;
4316 		default:
4317 			break;
4318 		}
4319 	}
4320 	return match;
4321 }
4322 
4323 /* update endpoint address(es) of template(s) */
4324 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4325 			       struct xfrm_migrate *m, int num_migrate)
4326 {
4327 	struct xfrm_migrate *mp;
4328 	int i, j, n = 0;
4329 
4330 	write_lock_bh(&pol->lock);
4331 	if (unlikely(pol->walk.dead)) {
4332 		/* target policy has been deleted */
4333 		write_unlock_bh(&pol->lock);
4334 		return -ENOENT;
4335 	}
4336 
4337 	for (i = 0; i < pol->xfrm_nr; i++) {
4338 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4339 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4340 				continue;
4341 			n++;
4342 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4343 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4344 				continue;
4345 			/* update endpoints */
4346 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4347 			       sizeof(pol->xfrm_vec[i].id.daddr));
4348 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4349 			       sizeof(pol->xfrm_vec[i].saddr));
4350 			pol->xfrm_vec[i].encap_family = mp->new_family;
4351 			/* flush bundles */
4352 			atomic_inc(&pol->genid);
4353 		}
4354 	}
4355 
4356 	write_unlock_bh(&pol->lock);
4357 
4358 	if (!n)
4359 		return -ENODATA;
4360 
4361 	return 0;
4362 }
4363 
4364 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4365 {
4366 	int i, j;
4367 
4368 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4369 		return -EINVAL;
4370 
4371 	for (i = 0; i < num_migrate; i++) {
4372 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4373 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4374 			return -EINVAL;
4375 
4376 		/* check if there is any duplicated entry */
4377 		for (j = i + 1; j < num_migrate; j++) {
4378 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4379 				    sizeof(m[i].old_daddr)) &&
4380 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4381 				    sizeof(m[i].old_saddr)) &&
4382 			    m[i].proto == m[j].proto &&
4383 			    m[i].mode == m[j].mode &&
4384 			    m[i].reqid == m[j].reqid &&
4385 			    m[i].old_family == m[j].old_family)
4386 				return -EINVAL;
4387 		}
4388 	}
4389 
4390 	return 0;
4391 }
4392 
4393 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4394 		 struct xfrm_migrate *m, int num_migrate,
4395 		 struct xfrm_kmaddress *k, struct net *net,
4396 		 struct xfrm_encap_tmpl *encap)
4397 {
4398 	int i, err, nx_cur = 0, nx_new = 0;
4399 	struct xfrm_policy *pol = NULL;
4400 	struct xfrm_state *x, *xc;
4401 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4402 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4403 	struct xfrm_migrate *mp;
4404 
4405 	/* Stage 0 - sanity checks */
4406 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4407 		goto out;
4408 
4409 	if (dir >= XFRM_POLICY_MAX) {
4410 		err = -EINVAL;
4411 		goto out;
4412 	}
4413 
4414 	/* Stage 1 - find policy */
4415 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4416 		err = -ENOENT;
4417 		goto out;
4418 	}
4419 
4420 	/* Stage 2 - find and update state(s) */
4421 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4422 		if ((x = xfrm_migrate_state_find(mp, net))) {
4423 			x_cur[nx_cur] = x;
4424 			nx_cur++;
4425 			xc = xfrm_state_migrate(x, mp, encap);
4426 			if (xc) {
4427 				x_new[nx_new] = xc;
4428 				nx_new++;
4429 			} else {
4430 				err = -ENODATA;
4431 				goto restore_state;
4432 			}
4433 		}
4434 	}
4435 
4436 	/* Stage 3 - update policy */
4437 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4438 		goto restore_state;
4439 
4440 	/* Stage 4 - delete old state(s) */
4441 	if (nx_cur) {
4442 		xfrm_states_put(x_cur, nx_cur);
4443 		xfrm_states_delete(x_cur, nx_cur);
4444 	}
4445 
4446 	/* Stage 5 - announce */
4447 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4448 
4449 	xfrm_pol_put(pol);
4450 
4451 	return 0;
4452 out:
4453 	return err;
4454 
4455 restore_state:
4456 	if (pol)
4457 		xfrm_pol_put(pol);
4458 	if (nx_cur)
4459 		xfrm_states_put(x_cur, nx_cur);
4460 	if (nx_new)
4461 		xfrm_states_delete(x_new, nx_new);
4462 
4463 	return err;
4464 }
4465 EXPORT_SYMBOL(xfrm_migrate);
4466 #endif
4467