xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision 83b975b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_policy.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	Kazunori MIYAZAWA @USAGI
11  * 	YOSHIFUJI Hideaki
12  * 		Split up af-specific portion
13  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14  *
15  */
16 
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/inet_ecn.h>
35 #include <net/xfrm.h>
36 #include <net/ip.h>
37 #include <net/gre.h>
38 #if IS_ENABLED(CONFIG_IPV6_MIP6)
39 #include <net/mip6.h>
40 #endif
41 #ifdef CONFIG_XFRM_STATISTICS
42 #include <net/snmp.h>
43 #endif
44 #ifdef CONFIG_XFRM_ESPINTCP
45 #include <net/espintcp.h>
46 #endif
47 
48 #include "xfrm_hash.h"
49 
50 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
51 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
52 #define XFRM_MAX_QUEUE_LEN	100
53 
54 struct xfrm_flo {
55 	struct dst_entry *dst_orig;
56 	u8 flags;
57 };
58 
59 /* prefixes smaller than this are stored in lists, not trees. */
60 #define INEXACT_PREFIXLEN_IPV4	16
61 #define INEXACT_PREFIXLEN_IPV6	48
62 
63 struct xfrm_pol_inexact_node {
64 	struct rb_node node;
65 	union {
66 		xfrm_address_t addr;
67 		struct rcu_head rcu;
68 	};
69 	u8 prefixlen;
70 
71 	struct rb_root root;
72 
73 	/* the policies matching this node, can be empty list */
74 	struct hlist_head hhead;
75 };
76 
77 /* xfrm inexact policy search tree:
78  * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
79  *  |
80  * +---- root_d: sorted by daddr:prefix
81  * |                 |
82  * |        xfrm_pol_inexact_node
83  * |                 |
84  * |                 +- root: sorted by saddr/prefix
85  * |                 |              |
86  * |                 |         xfrm_pol_inexact_node
87  * |                 |              |
88  * |                 |              + root: unused
89  * |                 |              |
90  * |                 |              + hhead: saddr:daddr policies
91  * |                 |
92  * |                 +- coarse policies and all any:daddr policies
93  * |
94  * +---- root_s: sorted by saddr:prefix
95  * |                 |
96  * |        xfrm_pol_inexact_node
97  * |                 |
98  * |                 + root: unused
99  * |                 |
100  * |                 + hhead: saddr:any policies
101  * |
102  * +---- coarse policies and all any:any policies
103  *
104  * Lookups return four candidate lists:
105  * 1. any:any list from top-level xfrm_pol_inexact_bin
106  * 2. any:daddr list from daddr tree
107  * 3. saddr:daddr list from 2nd level daddr tree
108  * 4. saddr:any list from saddr tree
109  *
110  * This result set then needs to be searched for the policy with
111  * the lowest priority.  If two results have same prio, youngest one wins.
112  */
113 
114 struct xfrm_pol_inexact_key {
115 	possible_net_t net;
116 	u32 if_id;
117 	u16 family;
118 	u8 dir, type;
119 };
120 
121 struct xfrm_pol_inexact_bin {
122 	struct xfrm_pol_inexact_key k;
123 	struct rhash_head head;
124 	/* list containing '*:*' policies */
125 	struct hlist_head hhead;
126 
127 	seqcount_spinlock_t count;
128 	/* tree sorted by daddr/prefix */
129 	struct rb_root root_d;
130 
131 	/* tree sorted by saddr/prefix */
132 	struct rb_root root_s;
133 
134 	/* slow path below */
135 	struct list_head inexact_bins;
136 	struct rcu_head rcu;
137 };
138 
139 enum xfrm_pol_inexact_candidate_type {
140 	XFRM_POL_CAND_BOTH,
141 	XFRM_POL_CAND_SADDR,
142 	XFRM_POL_CAND_DADDR,
143 	XFRM_POL_CAND_ANY,
144 
145 	XFRM_POL_CAND_MAX,
146 };
147 
148 struct xfrm_pol_inexact_candidates {
149 	struct hlist_head *res[XFRM_POL_CAND_MAX];
150 };
151 
152 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
153 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
154 
155 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
156 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
157 						__read_mostly;
158 
159 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
160 
161 static struct rhashtable xfrm_policy_inexact_table;
162 static const struct rhashtable_params xfrm_pol_inexact_params;
163 
164 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
165 static int stale_bundle(struct dst_entry *dst);
166 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
167 static void xfrm_policy_queue_process(struct timer_list *t);
168 
169 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
170 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
171 						int dir);
172 
173 static struct xfrm_pol_inexact_bin *
174 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
175 			   u32 if_id);
176 
177 static struct xfrm_pol_inexact_bin *
178 xfrm_policy_inexact_lookup_rcu(struct net *net,
179 			       u8 type, u16 family, u8 dir, u32 if_id);
180 static struct xfrm_policy *
181 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
182 			bool excl);
183 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
184 					    struct xfrm_policy *policy);
185 
186 static bool
187 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
188 				    struct xfrm_pol_inexact_bin *b,
189 				    const xfrm_address_t *saddr,
190 				    const xfrm_address_t *daddr);
191 
192 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
193 {
194 	return refcount_inc_not_zero(&policy->refcnt);
195 }
196 
197 static inline bool
198 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
199 {
200 	const struct flowi4 *fl4 = &fl->u.ip4;
201 
202 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
203 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
204 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
205 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
206 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
207 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
208 }
209 
210 static inline bool
211 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
212 {
213 	const struct flowi6 *fl6 = &fl->u.ip6;
214 
215 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
216 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
217 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
218 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
219 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
220 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
221 }
222 
223 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
224 			 unsigned short family)
225 {
226 	switch (family) {
227 	case AF_INET:
228 		return __xfrm4_selector_match(sel, fl);
229 	case AF_INET6:
230 		return __xfrm6_selector_match(sel, fl);
231 	}
232 	return false;
233 }
234 
235 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
236 {
237 	const struct xfrm_policy_afinfo *afinfo;
238 
239 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
240 		return NULL;
241 	rcu_read_lock();
242 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
243 	if (unlikely(!afinfo))
244 		rcu_read_unlock();
245 	return afinfo;
246 }
247 
248 /* Called with rcu_read_lock(). */
249 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
250 {
251 	return rcu_dereference(xfrm_if_cb);
252 }
253 
254 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
255 				    const xfrm_address_t *saddr,
256 				    const xfrm_address_t *daddr,
257 				    int family, u32 mark)
258 {
259 	const struct xfrm_policy_afinfo *afinfo;
260 	struct dst_entry *dst;
261 
262 	afinfo = xfrm_policy_get_afinfo(family);
263 	if (unlikely(afinfo == NULL))
264 		return ERR_PTR(-EAFNOSUPPORT);
265 
266 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
267 
268 	rcu_read_unlock();
269 
270 	return dst;
271 }
272 EXPORT_SYMBOL(__xfrm_dst_lookup);
273 
274 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
275 						int tos, int oif,
276 						xfrm_address_t *prev_saddr,
277 						xfrm_address_t *prev_daddr,
278 						int family, u32 mark)
279 {
280 	struct net *net = xs_net(x);
281 	xfrm_address_t *saddr = &x->props.saddr;
282 	xfrm_address_t *daddr = &x->id.daddr;
283 	struct dst_entry *dst;
284 
285 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
286 		saddr = x->coaddr;
287 		daddr = prev_daddr;
288 	}
289 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
290 		saddr = prev_saddr;
291 		daddr = x->coaddr;
292 	}
293 
294 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
295 
296 	if (!IS_ERR(dst)) {
297 		if (prev_saddr != saddr)
298 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
299 		if (prev_daddr != daddr)
300 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
301 	}
302 
303 	return dst;
304 }
305 
306 static inline unsigned long make_jiffies(long secs)
307 {
308 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
309 		return MAX_SCHEDULE_TIMEOUT-1;
310 	else
311 		return secs*HZ;
312 }
313 
314 static void xfrm_policy_timer(struct timer_list *t)
315 {
316 	struct xfrm_policy *xp = from_timer(xp, t, timer);
317 	time64_t now = ktime_get_real_seconds();
318 	time64_t next = TIME64_MAX;
319 	int warn = 0;
320 	int dir;
321 
322 	read_lock(&xp->lock);
323 
324 	if (unlikely(xp->walk.dead))
325 		goto out;
326 
327 	dir = xfrm_policy_id2dir(xp->index);
328 
329 	if (xp->lft.hard_add_expires_seconds) {
330 		time64_t tmo = xp->lft.hard_add_expires_seconds +
331 			xp->curlft.add_time - now;
332 		if (tmo <= 0)
333 			goto expired;
334 		if (tmo < next)
335 			next = tmo;
336 	}
337 	if (xp->lft.hard_use_expires_seconds) {
338 		time64_t tmo = xp->lft.hard_use_expires_seconds +
339 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
340 		if (tmo <= 0)
341 			goto expired;
342 		if (tmo < next)
343 			next = tmo;
344 	}
345 	if (xp->lft.soft_add_expires_seconds) {
346 		time64_t tmo = xp->lft.soft_add_expires_seconds +
347 			xp->curlft.add_time - now;
348 		if (tmo <= 0) {
349 			warn = 1;
350 			tmo = XFRM_KM_TIMEOUT;
351 		}
352 		if (tmo < next)
353 			next = tmo;
354 	}
355 	if (xp->lft.soft_use_expires_seconds) {
356 		time64_t tmo = xp->lft.soft_use_expires_seconds +
357 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
358 		if (tmo <= 0) {
359 			warn = 1;
360 			tmo = XFRM_KM_TIMEOUT;
361 		}
362 		if (tmo < next)
363 			next = tmo;
364 	}
365 
366 	if (warn)
367 		km_policy_expired(xp, dir, 0, 0);
368 	if (next != TIME64_MAX &&
369 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
370 		xfrm_pol_hold(xp);
371 
372 out:
373 	read_unlock(&xp->lock);
374 	xfrm_pol_put(xp);
375 	return;
376 
377 expired:
378 	read_unlock(&xp->lock);
379 	if (!xfrm_policy_delete(xp, dir))
380 		km_policy_expired(xp, dir, 1, 0);
381 	xfrm_pol_put(xp);
382 }
383 
384 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
385  * SPD calls.
386  */
387 
388 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
389 {
390 	struct xfrm_policy *policy;
391 
392 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
393 
394 	if (policy) {
395 		write_pnet(&policy->xp_net, net);
396 		INIT_LIST_HEAD(&policy->walk.all);
397 		INIT_HLIST_NODE(&policy->bydst_inexact_list);
398 		INIT_HLIST_NODE(&policy->bydst);
399 		INIT_HLIST_NODE(&policy->byidx);
400 		rwlock_init(&policy->lock);
401 		refcount_set(&policy->refcnt, 1);
402 		skb_queue_head_init(&policy->polq.hold_queue);
403 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
404 		timer_setup(&policy->polq.hold_timer,
405 			    xfrm_policy_queue_process, 0);
406 	}
407 	return policy;
408 }
409 EXPORT_SYMBOL(xfrm_policy_alloc);
410 
411 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
412 {
413 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
414 
415 	security_xfrm_policy_free(policy->security);
416 	kfree(policy);
417 }
418 
419 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
420 
421 void xfrm_policy_destroy(struct xfrm_policy *policy)
422 {
423 	BUG_ON(!policy->walk.dead);
424 
425 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
426 		BUG();
427 
428 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
429 }
430 EXPORT_SYMBOL(xfrm_policy_destroy);
431 
432 /* Rule must be locked. Release descendant resources, announce
433  * entry dead. The rule must be unlinked from lists to the moment.
434  */
435 
436 static void xfrm_policy_kill(struct xfrm_policy *policy)
437 {
438 	write_lock_bh(&policy->lock);
439 	policy->walk.dead = 1;
440 	write_unlock_bh(&policy->lock);
441 
442 	atomic_inc(&policy->genid);
443 
444 	if (del_timer(&policy->polq.hold_timer))
445 		xfrm_pol_put(policy);
446 	skb_queue_purge(&policy->polq.hold_queue);
447 
448 	if (del_timer(&policy->timer))
449 		xfrm_pol_put(policy);
450 
451 	xfrm_pol_put(policy);
452 }
453 
454 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
455 
456 static inline unsigned int idx_hash(struct net *net, u32 index)
457 {
458 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
459 }
460 
461 /* calculate policy hash thresholds */
462 static void __get_hash_thresh(struct net *net,
463 			      unsigned short family, int dir,
464 			      u8 *dbits, u8 *sbits)
465 {
466 	switch (family) {
467 	case AF_INET:
468 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
469 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
470 		break;
471 
472 	case AF_INET6:
473 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
474 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
475 		break;
476 
477 	default:
478 		*dbits = 0;
479 		*sbits = 0;
480 	}
481 }
482 
483 static struct hlist_head *policy_hash_bysel(struct net *net,
484 					    const struct xfrm_selector *sel,
485 					    unsigned short family, int dir)
486 {
487 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
488 	unsigned int hash;
489 	u8 dbits;
490 	u8 sbits;
491 
492 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
493 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
494 
495 	if (hash == hmask + 1)
496 		return NULL;
497 
498 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
499 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
500 }
501 
502 static struct hlist_head *policy_hash_direct(struct net *net,
503 					     const xfrm_address_t *daddr,
504 					     const xfrm_address_t *saddr,
505 					     unsigned short family, int dir)
506 {
507 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
508 	unsigned int hash;
509 	u8 dbits;
510 	u8 sbits;
511 
512 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
513 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
514 
515 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
516 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
517 }
518 
519 static void xfrm_dst_hash_transfer(struct net *net,
520 				   struct hlist_head *list,
521 				   struct hlist_head *ndsttable,
522 				   unsigned int nhashmask,
523 				   int dir)
524 {
525 	struct hlist_node *tmp, *entry0 = NULL;
526 	struct xfrm_policy *pol;
527 	unsigned int h0 = 0;
528 	u8 dbits;
529 	u8 sbits;
530 
531 redo:
532 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
533 		unsigned int h;
534 
535 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
536 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
537 				pol->family, nhashmask, dbits, sbits);
538 		if (!entry0) {
539 			hlist_del_rcu(&pol->bydst);
540 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
541 			h0 = h;
542 		} else {
543 			if (h != h0)
544 				continue;
545 			hlist_del_rcu(&pol->bydst);
546 			hlist_add_behind_rcu(&pol->bydst, entry0);
547 		}
548 		entry0 = &pol->bydst;
549 	}
550 	if (!hlist_empty(list)) {
551 		entry0 = NULL;
552 		goto redo;
553 	}
554 }
555 
556 static void xfrm_idx_hash_transfer(struct hlist_head *list,
557 				   struct hlist_head *nidxtable,
558 				   unsigned int nhashmask)
559 {
560 	struct hlist_node *tmp;
561 	struct xfrm_policy *pol;
562 
563 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
564 		unsigned int h;
565 
566 		h = __idx_hash(pol->index, nhashmask);
567 		hlist_add_head(&pol->byidx, nidxtable+h);
568 	}
569 }
570 
571 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
572 {
573 	return ((old_hmask + 1) << 1) - 1;
574 }
575 
576 static void xfrm_bydst_resize(struct net *net, int dir)
577 {
578 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
579 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
580 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
581 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
582 	struct hlist_head *odst;
583 	int i;
584 
585 	if (!ndst)
586 		return;
587 
588 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
589 	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
590 
591 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
592 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
593 
594 	for (i = hmask; i >= 0; i--)
595 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
596 
597 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
598 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
599 
600 	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
601 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
602 
603 	synchronize_rcu();
604 
605 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
606 }
607 
608 static void xfrm_byidx_resize(struct net *net, int total)
609 {
610 	unsigned int hmask = net->xfrm.policy_idx_hmask;
611 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
612 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
613 	struct hlist_head *oidx = net->xfrm.policy_byidx;
614 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
615 	int i;
616 
617 	if (!nidx)
618 		return;
619 
620 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
621 
622 	for (i = hmask; i >= 0; i--)
623 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
624 
625 	net->xfrm.policy_byidx = nidx;
626 	net->xfrm.policy_idx_hmask = nhashmask;
627 
628 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
629 
630 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
631 }
632 
633 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
634 {
635 	unsigned int cnt = net->xfrm.policy_count[dir];
636 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
637 
638 	if (total)
639 		*total += cnt;
640 
641 	if ((hmask + 1) < xfrm_policy_hashmax &&
642 	    cnt > hmask)
643 		return 1;
644 
645 	return 0;
646 }
647 
648 static inline int xfrm_byidx_should_resize(struct net *net, int total)
649 {
650 	unsigned int hmask = net->xfrm.policy_idx_hmask;
651 
652 	if ((hmask + 1) < xfrm_policy_hashmax &&
653 	    total > hmask)
654 		return 1;
655 
656 	return 0;
657 }
658 
659 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
660 {
661 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
662 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
663 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
664 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
665 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
666 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
667 	si->spdhcnt = net->xfrm.policy_idx_hmask;
668 	si->spdhmcnt = xfrm_policy_hashmax;
669 }
670 EXPORT_SYMBOL(xfrm_spd_getinfo);
671 
672 static DEFINE_MUTEX(hash_resize_mutex);
673 static void xfrm_hash_resize(struct work_struct *work)
674 {
675 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
676 	int dir, total;
677 
678 	mutex_lock(&hash_resize_mutex);
679 
680 	total = 0;
681 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
682 		if (xfrm_bydst_should_resize(net, dir, &total))
683 			xfrm_bydst_resize(net, dir);
684 	}
685 	if (xfrm_byidx_should_resize(net, total))
686 		xfrm_byidx_resize(net, total);
687 
688 	mutex_unlock(&hash_resize_mutex);
689 }
690 
691 /* Make sure *pol can be inserted into fastbin.
692  * Useful to check that later insert requests will be successful
693  * (provided xfrm_policy_lock is held throughout).
694  */
695 static struct xfrm_pol_inexact_bin *
696 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
697 {
698 	struct xfrm_pol_inexact_bin *bin, *prev;
699 	struct xfrm_pol_inexact_key k = {
700 		.family = pol->family,
701 		.type = pol->type,
702 		.dir = dir,
703 		.if_id = pol->if_id,
704 	};
705 	struct net *net = xp_net(pol);
706 
707 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
708 
709 	write_pnet(&k.net, net);
710 	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
711 				     xfrm_pol_inexact_params);
712 	if (bin)
713 		return bin;
714 
715 	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
716 	if (!bin)
717 		return NULL;
718 
719 	bin->k = k;
720 	INIT_HLIST_HEAD(&bin->hhead);
721 	bin->root_d = RB_ROOT;
722 	bin->root_s = RB_ROOT;
723 	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
724 
725 	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
726 						&bin->k, &bin->head,
727 						xfrm_pol_inexact_params);
728 	if (!prev) {
729 		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
730 		return bin;
731 	}
732 
733 	kfree(bin);
734 
735 	return IS_ERR(prev) ? NULL : prev;
736 }
737 
738 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
739 					       int family, u8 prefixlen)
740 {
741 	if (xfrm_addr_any(addr, family))
742 		return true;
743 
744 	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
745 		return true;
746 
747 	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
748 		return true;
749 
750 	return false;
751 }
752 
753 static bool
754 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
755 {
756 	const xfrm_address_t *addr;
757 	bool saddr_any, daddr_any;
758 	u8 prefixlen;
759 
760 	addr = &policy->selector.saddr;
761 	prefixlen = policy->selector.prefixlen_s;
762 
763 	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
764 						       policy->family,
765 						       prefixlen);
766 	addr = &policy->selector.daddr;
767 	prefixlen = policy->selector.prefixlen_d;
768 	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
769 						       policy->family,
770 						       prefixlen);
771 	return saddr_any && daddr_any;
772 }
773 
774 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
775 				       const xfrm_address_t *addr, u8 prefixlen)
776 {
777 	node->addr = *addr;
778 	node->prefixlen = prefixlen;
779 }
780 
781 static struct xfrm_pol_inexact_node *
782 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
783 {
784 	struct xfrm_pol_inexact_node *node;
785 
786 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
787 	if (node)
788 		xfrm_pol_inexact_node_init(node, addr, prefixlen);
789 
790 	return node;
791 }
792 
793 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
794 				  const xfrm_address_t *b,
795 				  u8 prefixlen, u16 family)
796 {
797 	u32 ma, mb, mask;
798 	unsigned int pdw, pbi;
799 	int delta = 0;
800 
801 	switch (family) {
802 	case AF_INET:
803 		if (prefixlen == 0)
804 			return 0;
805 		mask = ~0U << (32 - prefixlen);
806 		ma = ntohl(a->a4) & mask;
807 		mb = ntohl(b->a4) & mask;
808 		if (ma < mb)
809 			delta = -1;
810 		else if (ma > mb)
811 			delta = 1;
812 		break;
813 	case AF_INET6:
814 		pdw = prefixlen >> 5;
815 		pbi = prefixlen & 0x1f;
816 
817 		if (pdw) {
818 			delta = memcmp(a->a6, b->a6, pdw << 2);
819 			if (delta)
820 				return delta;
821 		}
822 		if (pbi) {
823 			mask = ~0U << (32 - pbi);
824 			ma = ntohl(a->a6[pdw]) & mask;
825 			mb = ntohl(b->a6[pdw]) & mask;
826 			if (ma < mb)
827 				delta = -1;
828 			else if (ma > mb)
829 				delta = 1;
830 		}
831 		break;
832 	default:
833 		break;
834 	}
835 
836 	return delta;
837 }
838 
839 static void xfrm_policy_inexact_list_reinsert(struct net *net,
840 					      struct xfrm_pol_inexact_node *n,
841 					      u16 family)
842 {
843 	unsigned int matched_s, matched_d;
844 	struct xfrm_policy *policy, *p;
845 
846 	matched_s = 0;
847 	matched_d = 0;
848 
849 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
850 		struct hlist_node *newpos = NULL;
851 		bool matches_s, matches_d;
852 
853 		if (!policy->bydst_reinsert)
854 			continue;
855 
856 		WARN_ON_ONCE(policy->family != family);
857 
858 		policy->bydst_reinsert = false;
859 		hlist_for_each_entry(p, &n->hhead, bydst) {
860 			if (policy->priority > p->priority)
861 				newpos = &p->bydst;
862 			else if (policy->priority == p->priority &&
863 				 policy->pos > p->pos)
864 				newpos = &p->bydst;
865 			else
866 				break;
867 		}
868 
869 		if (newpos)
870 			hlist_add_behind_rcu(&policy->bydst, newpos);
871 		else
872 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
873 
874 		/* paranoia checks follow.
875 		 * Check that the reinserted policy matches at least
876 		 * saddr or daddr for current node prefix.
877 		 *
878 		 * Matching both is fine, matching saddr in one policy
879 		 * (but not daddr) and then matching only daddr in another
880 		 * is a bug.
881 		 */
882 		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
883 						   &n->addr,
884 						   n->prefixlen,
885 						   family) == 0;
886 		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
887 						   &n->addr,
888 						   n->prefixlen,
889 						   family) == 0;
890 		if (matches_s && matches_d)
891 			continue;
892 
893 		WARN_ON_ONCE(!matches_s && !matches_d);
894 		if (matches_s)
895 			matched_s++;
896 		if (matches_d)
897 			matched_d++;
898 		WARN_ON_ONCE(matched_s && matched_d);
899 	}
900 }
901 
902 static void xfrm_policy_inexact_node_reinsert(struct net *net,
903 					      struct xfrm_pol_inexact_node *n,
904 					      struct rb_root *new,
905 					      u16 family)
906 {
907 	struct xfrm_pol_inexact_node *node;
908 	struct rb_node **p, *parent;
909 
910 	/* we should not have another subtree here */
911 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
912 restart:
913 	parent = NULL;
914 	p = &new->rb_node;
915 	while (*p) {
916 		u8 prefixlen;
917 		int delta;
918 
919 		parent = *p;
920 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
921 
922 		prefixlen = min(node->prefixlen, n->prefixlen);
923 
924 		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
925 					       prefixlen, family);
926 		if (delta < 0) {
927 			p = &parent->rb_left;
928 		} else if (delta > 0) {
929 			p = &parent->rb_right;
930 		} else {
931 			bool same_prefixlen = node->prefixlen == n->prefixlen;
932 			struct xfrm_policy *tmp;
933 
934 			hlist_for_each_entry(tmp, &n->hhead, bydst) {
935 				tmp->bydst_reinsert = true;
936 				hlist_del_rcu(&tmp->bydst);
937 			}
938 
939 			node->prefixlen = prefixlen;
940 
941 			xfrm_policy_inexact_list_reinsert(net, node, family);
942 
943 			if (same_prefixlen) {
944 				kfree_rcu(n, rcu);
945 				return;
946 			}
947 
948 			rb_erase(*p, new);
949 			kfree_rcu(n, rcu);
950 			n = node;
951 			goto restart;
952 		}
953 	}
954 
955 	rb_link_node_rcu(&n->node, parent, p);
956 	rb_insert_color(&n->node, new);
957 }
958 
959 /* merge nodes v and n */
960 static void xfrm_policy_inexact_node_merge(struct net *net,
961 					   struct xfrm_pol_inexact_node *v,
962 					   struct xfrm_pol_inexact_node *n,
963 					   u16 family)
964 {
965 	struct xfrm_pol_inexact_node *node;
966 	struct xfrm_policy *tmp;
967 	struct rb_node *rnode;
968 
969 	/* To-be-merged node v has a subtree.
970 	 *
971 	 * Dismantle it and insert its nodes to n->root.
972 	 */
973 	while ((rnode = rb_first(&v->root)) != NULL) {
974 		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
975 		rb_erase(&node->node, &v->root);
976 		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
977 						  family);
978 	}
979 
980 	hlist_for_each_entry(tmp, &v->hhead, bydst) {
981 		tmp->bydst_reinsert = true;
982 		hlist_del_rcu(&tmp->bydst);
983 	}
984 
985 	xfrm_policy_inexact_list_reinsert(net, n, family);
986 }
987 
988 static struct xfrm_pol_inexact_node *
989 xfrm_policy_inexact_insert_node(struct net *net,
990 				struct rb_root *root,
991 				xfrm_address_t *addr,
992 				u16 family, u8 prefixlen, u8 dir)
993 {
994 	struct xfrm_pol_inexact_node *cached = NULL;
995 	struct rb_node **p, *parent = NULL;
996 	struct xfrm_pol_inexact_node *node;
997 
998 	p = &root->rb_node;
999 	while (*p) {
1000 		int delta;
1001 
1002 		parent = *p;
1003 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1004 
1005 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1006 					       node->prefixlen,
1007 					       family);
1008 		if (delta == 0 && prefixlen >= node->prefixlen) {
1009 			WARN_ON_ONCE(cached); /* ipsec policies got lost */
1010 			return node;
1011 		}
1012 
1013 		if (delta < 0)
1014 			p = &parent->rb_left;
1015 		else
1016 			p = &parent->rb_right;
1017 
1018 		if (prefixlen < node->prefixlen) {
1019 			delta = xfrm_policy_addr_delta(addr, &node->addr,
1020 						       prefixlen,
1021 						       family);
1022 			if (delta)
1023 				continue;
1024 
1025 			/* This node is a subnet of the new prefix. It needs
1026 			 * to be removed and re-inserted with the smaller
1027 			 * prefix and all nodes that are now also covered
1028 			 * by the reduced prefixlen.
1029 			 */
1030 			rb_erase(&node->node, root);
1031 
1032 			if (!cached) {
1033 				xfrm_pol_inexact_node_init(node, addr,
1034 							   prefixlen);
1035 				cached = node;
1036 			} else {
1037 				/* This node also falls within the new
1038 				 * prefixlen. Merge the to-be-reinserted
1039 				 * node and this one.
1040 				 */
1041 				xfrm_policy_inexact_node_merge(net, node,
1042 							       cached, family);
1043 				kfree_rcu(node, rcu);
1044 			}
1045 
1046 			/* restart */
1047 			p = &root->rb_node;
1048 			parent = NULL;
1049 		}
1050 	}
1051 
1052 	node = cached;
1053 	if (!node) {
1054 		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1055 		if (!node)
1056 			return NULL;
1057 	}
1058 
1059 	rb_link_node_rcu(&node->node, parent, p);
1060 	rb_insert_color(&node->node, root);
1061 
1062 	return node;
1063 }
1064 
1065 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1066 {
1067 	struct xfrm_pol_inexact_node *node;
1068 	struct rb_node *rn = rb_first(r);
1069 
1070 	while (rn) {
1071 		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1072 
1073 		xfrm_policy_inexact_gc_tree(&node->root, rm);
1074 		rn = rb_next(rn);
1075 
1076 		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1077 			WARN_ON_ONCE(rm);
1078 			continue;
1079 		}
1080 
1081 		rb_erase(&node->node, r);
1082 		kfree_rcu(node, rcu);
1083 	}
1084 }
1085 
1086 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1087 {
1088 	write_seqcount_begin(&b->count);
1089 	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1090 	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1091 	write_seqcount_end(&b->count);
1092 
1093 	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1094 	    !hlist_empty(&b->hhead)) {
1095 		WARN_ON_ONCE(net_exit);
1096 		return;
1097 	}
1098 
1099 	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1100 				   xfrm_pol_inexact_params) == 0) {
1101 		list_del(&b->inexact_bins);
1102 		kfree_rcu(b, rcu);
1103 	}
1104 }
1105 
1106 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1107 {
1108 	struct net *net = read_pnet(&b->k.net);
1109 
1110 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1111 	__xfrm_policy_inexact_prune_bin(b, false);
1112 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1113 }
1114 
1115 static void __xfrm_policy_inexact_flush(struct net *net)
1116 {
1117 	struct xfrm_pol_inexact_bin *bin, *t;
1118 
1119 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1120 
1121 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1122 		__xfrm_policy_inexact_prune_bin(bin, false);
1123 }
1124 
1125 static struct hlist_head *
1126 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1127 				struct xfrm_policy *policy, u8 dir)
1128 {
1129 	struct xfrm_pol_inexact_node *n;
1130 	struct net *net;
1131 
1132 	net = xp_net(policy);
1133 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1134 
1135 	if (xfrm_policy_inexact_insert_use_any_list(policy))
1136 		return &bin->hhead;
1137 
1138 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1139 					       policy->family,
1140 					       policy->selector.prefixlen_d)) {
1141 		write_seqcount_begin(&bin->count);
1142 		n = xfrm_policy_inexact_insert_node(net,
1143 						    &bin->root_s,
1144 						    &policy->selector.saddr,
1145 						    policy->family,
1146 						    policy->selector.prefixlen_s,
1147 						    dir);
1148 		write_seqcount_end(&bin->count);
1149 		if (!n)
1150 			return NULL;
1151 
1152 		return &n->hhead;
1153 	}
1154 
1155 	/* daddr is fixed */
1156 	write_seqcount_begin(&bin->count);
1157 	n = xfrm_policy_inexact_insert_node(net,
1158 					    &bin->root_d,
1159 					    &policy->selector.daddr,
1160 					    policy->family,
1161 					    policy->selector.prefixlen_d, dir);
1162 	write_seqcount_end(&bin->count);
1163 	if (!n)
1164 		return NULL;
1165 
1166 	/* saddr is wildcard */
1167 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1168 					       policy->family,
1169 					       policy->selector.prefixlen_s))
1170 		return &n->hhead;
1171 
1172 	write_seqcount_begin(&bin->count);
1173 	n = xfrm_policy_inexact_insert_node(net,
1174 					    &n->root,
1175 					    &policy->selector.saddr,
1176 					    policy->family,
1177 					    policy->selector.prefixlen_s, dir);
1178 	write_seqcount_end(&bin->count);
1179 	if (!n)
1180 		return NULL;
1181 
1182 	return &n->hhead;
1183 }
1184 
1185 static struct xfrm_policy *
1186 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1187 {
1188 	struct xfrm_pol_inexact_bin *bin;
1189 	struct xfrm_policy *delpol;
1190 	struct hlist_head *chain;
1191 	struct net *net;
1192 
1193 	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1194 	if (!bin)
1195 		return ERR_PTR(-ENOMEM);
1196 
1197 	net = xp_net(policy);
1198 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1199 
1200 	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1201 	if (!chain) {
1202 		__xfrm_policy_inexact_prune_bin(bin, false);
1203 		return ERR_PTR(-ENOMEM);
1204 	}
1205 
1206 	delpol = xfrm_policy_insert_list(chain, policy, excl);
1207 	if (delpol && excl) {
1208 		__xfrm_policy_inexact_prune_bin(bin, false);
1209 		return ERR_PTR(-EEXIST);
1210 	}
1211 
1212 	chain = &net->xfrm.policy_inexact[dir];
1213 	xfrm_policy_insert_inexact_list(chain, policy);
1214 
1215 	if (delpol)
1216 		__xfrm_policy_inexact_prune_bin(bin, false);
1217 
1218 	return delpol;
1219 }
1220 
1221 static void xfrm_hash_rebuild(struct work_struct *work)
1222 {
1223 	struct net *net = container_of(work, struct net,
1224 				       xfrm.policy_hthresh.work);
1225 	unsigned int hmask;
1226 	struct xfrm_policy *pol;
1227 	struct xfrm_policy *policy;
1228 	struct hlist_head *chain;
1229 	struct hlist_head *odst;
1230 	struct hlist_node *newpos;
1231 	int i;
1232 	int dir;
1233 	unsigned seq;
1234 	u8 lbits4, rbits4, lbits6, rbits6;
1235 
1236 	mutex_lock(&hash_resize_mutex);
1237 
1238 	/* read selector prefixlen thresholds */
1239 	do {
1240 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1241 
1242 		lbits4 = net->xfrm.policy_hthresh.lbits4;
1243 		rbits4 = net->xfrm.policy_hthresh.rbits4;
1244 		lbits6 = net->xfrm.policy_hthresh.lbits6;
1245 		rbits6 = net->xfrm.policy_hthresh.rbits6;
1246 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1247 
1248 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1249 	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1250 
1251 	/* make sure that we can insert the indirect policies again before
1252 	 * we start with destructive action.
1253 	 */
1254 	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1255 		struct xfrm_pol_inexact_bin *bin;
1256 		u8 dbits, sbits;
1257 
1258 		dir = xfrm_policy_id2dir(policy->index);
1259 		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1260 			continue;
1261 
1262 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1263 			if (policy->family == AF_INET) {
1264 				dbits = rbits4;
1265 				sbits = lbits4;
1266 			} else {
1267 				dbits = rbits6;
1268 				sbits = lbits6;
1269 			}
1270 		} else {
1271 			if (policy->family == AF_INET) {
1272 				dbits = lbits4;
1273 				sbits = rbits4;
1274 			} else {
1275 				dbits = lbits6;
1276 				sbits = rbits6;
1277 			}
1278 		}
1279 
1280 		if (policy->selector.prefixlen_d < dbits ||
1281 		    policy->selector.prefixlen_s < sbits)
1282 			continue;
1283 
1284 		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1285 		if (!bin)
1286 			goto out_unlock;
1287 
1288 		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1289 			goto out_unlock;
1290 	}
1291 
1292 	/* reset the bydst and inexact table in all directions */
1293 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1294 		struct hlist_node *n;
1295 
1296 		hlist_for_each_entry_safe(policy, n,
1297 					  &net->xfrm.policy_inexact[dir],
1298 					  bydst_inexact_list) {
1299 			hlist_del_rcu(&policy->bydst);
1300 			hlist_del_init(&policy->bydst_inexact_list);
1301 		}
1302 
1303 		hmask = net->xfrm.policy_bydst[dir].hmask;
1304 		odst = net->xfrm.policy_bydst[dir].table;
1305 		for (i = hmask; i >= 0; i--) {
1306 			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1307 				hlist_del_rcu(&policy->bydst);
1308 		}
1309 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1310 			/* dir out => dst = remote, src = local */
1311 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1312 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1313 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1314 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1315 		} else {
1316 			/* dir in/fwd => dst = local, src = remote */
1317 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1318 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1319 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1320 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1321 		}
1322 	}
1323 
1324 	/* re-insert all policies by order of creation */
1325 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1326 		if (policy->walk.dead)
1327 			continue;
1328 		dir = xfrm_policy_id2dir(policy->index);
1329 		if (dir >= XFRM_POLICY_MAX) {
1330 			/* skip socket policies */
1331 			continue;
1332 		}
1333 		newpos = NULL;
1334 		chain = policy_hash_bysel(net, &policy->selector,
1335 					  policy->family, dir);
1336 
1337 		if (!chain) {
1338 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1339 
1340 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1341 			continue;
1342 		}
1343 
1344 		hlist_for_each_entry(pol, chain, bydst) {
1345 			if (policy->priority >= pol->priority)
1346 				newpos = &pol->bydst;
1347 			else
1348 				break;
1349 		}
1350 		if (newpos)
1351 			hlist_add_behind_rcu(&policy->bydst, newpos);
1352 		else
1353 			hlist_add_head_rcu(&policy->bydst, chain);
1354 	}
1355 
1356 out_unlock:
1357 	__xfrm_policy_inexact_flush(net);
1358 	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1359 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1360 
1361 	mutex_unlock(&hash_resize_mutex);
1362 }
1363 
1364 void xfrm_policy_hash_rebuild(struct net *net)
1365 {
1366 	schedule_work(&net->xfrm.policy_hthresh.work);
1367 }
1368 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1369 
1370 /* Generate new index... KAME seems to generate them ordered by cost
1371  * of an absolute inpredictability of ordering of rules. This will not pass. */
1372 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1373 {
1374 	static u32 idx_generator;
1375 
1376 	for (;;) {
1377 		struct hlist_head *list;
1378 		struct xfrm_policy *p;
1379 		u32 idx;
1380 		int found;
1381 
1382 		if (!index) {
1383 			idx = (idx_generator | dir);
1384 			idx_generator += 8;
1385 		} else {
1386 			idx = index;
1387 			index = 0;
1388 		}
1389 
1390 		if (idx == 0)
1391 			idx = 8;
1392 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1393 		found = 0;
1394 		hlist_for_each_entry(p, list, byidx) {
1395 			if (p->index == idx) {
1396 				found = 1;
1397 				break;
1398 			}
1399 		}
1400 		if (!found)
1401 			return idx;
1402 	}
1403 }
1404 
1405 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1406 {
1407 	u32 *p1 = (u32 *) s1;
1408 	u32 *p2 = (u32 *) s2;
1409 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1410 	int i;
1411 
1412 	for (i = 0; i < len; i++) {
1413 		if (p1[i] != p2[i])
1414 			return 1;
1415 	}
1416 
1417 	return 0;
1418 }
1419 
1420 static void xfrm_policy_requeue(struct xfrm_policy *old,
1421 				struct xfrm_policy *new)
1422 {
1423 	struct xfrm_policy_queue *pq = &old->polq;
1424 	struct sk_buff_head list;
1425 
1426 	if (skb_queue_empty(&pq->hold_queue))
1427 		return;
1428 
1429 	__skb_queue_head_init(&list);
1430 
1431 	spin_lock_bh(&pq->hold_queue.lock);
1432 	skb_queue_splice_init(&pq->hold_queue, &list);
1433 	if (del_timer(&pq->hold_timer))
1434 		xfrm_pol_put(old);
1435 	spin_unlock_bh(&pq->hold_queue.lock);
1436 
1437 	pq = &new->polq;
1438 
1439 	spin_lock_bh(&pq->hold_queue.lock);
1440 	skb_queue_splice(&list, &pq->hold_queue);
1441 	pq->timeout = XFRM_QUEUE_TMO_MIN;
1442 	if (!mod_timer(&pq->hold_timer, jiffies))
1443 		xfrm_pol_hold(new);
1444 	spin_unlock_bh(&pq->hold_queue.lock);
1445 }
1446 
1447 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1448 					  struct xfrm_policy *pol)
1449 {
1450 	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1451 }
1452 
1453 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1454 {
1455 	const struct xfrm_pol_inexact_key *k = data;
1456 	u32 a = k->type << 24 | k->dir << 16 | k->family;
1457 
1458 	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1459 			    seed);
1460 }
1461 
1462 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1463 {
1464 	const struct xfrm_pol_inexact_bin *b = data;
1465 
1466 	return xfrm_pol_bin_key(&b->k, 0, seed);
1467 }
1468 
1469 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1470 			    const void *ptr)
1471 {
1472 	const struct xfrm_pol_inexact_key *key = arg->key;
1473 	const struct xfrm_pol_inexact_bin *b = ptr;
1474 	int ret;
1475 
1476 	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1477 		return -1;
1478 
1479 	ret = b->k.dir ^ key->dir;
1480 	if (ret)
1481 		return ret;
1482 
1483 	ret = b->k.type ^ key->type;
1484 	if (ret)
1485 		return ret;
1486 
1487 	ret = b->k.family ^ key->family;
1488 	if (ret)
1489 		return ret;
1490 
1491 	return b->k.if_id ^ key->if_id;
1492 }
1493 
1494 static const struct rhashtable_params xfrm_pol_inexact_params = {
1495 	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1496 	.hashfn			= xfrm_pol_bin_key,
1497 	.obj_hashfn		= xfrm_pol_bin_obj,
1498 	.obj_cmpfn		= xfrm_pol_bin_cmp,
1499 	.automatic_shrinking	= true,
1500 };
1501 
1502 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1503 					    struct xfrm_policy *policy)
1504 {
1505 	struct xfrm_policy *pol, *delpol = NULL;
1506 	struct hlist_node *newpos = NULL;
1507 	int i = 0;
1508 
1509 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1510 		if (pol->type == policy->type &&
1511 		    pol->if_id == policy->if_id &&
1512 		    !selector_cmp(&pol->selector, &policy->selector) &&
1513 		    xfrm_policy_mark_match(&policy->mark, pol) &&
1514 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1515 		    !WARN_ON(delpol)) {
1516 			delpol = pol;
1517 			if (policy->priority > pol->priority)
1518 				continue;
1519 		} else if (policy->priority >= pol->priority) {
1520 			newpos = &pol->bydst_inexact_list;
1521 			continue;
1522 		}
1523 		if (delpol)
1524 			break;
1525 	}
1526 
1527 	if (newpos)
1528 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1529 	else
1530 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1531 
1532 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1533 		pol->pos = i;
1534 		i++;
1535 	}
1536 }
1537 
1538 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1539 						   struct xfrm_policy *policy,
1540 						   bool excl)
1541 {
1542 	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1543 
1544 	hlist_for_each_entry(pol, chain, bydst) {
1545 		if (pol->type == policy->type &&
1546 		    pol->if_id == policy->if_id &&
1547 		    !selector_cmp(&pol->selector, &policy->selector) &&
1548 		    xfrm_policy_mark_match(&policy->mark, pol) &&
1549 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1550 		    !WARN_ON(delpol)) {
1551 			if (excl)
1552 				return ERR_PTR(-EEXIST);
1553 			delpol = pol;
1554 			if (policy->priority > pol->priority)
1555 				continue;
1556 		} else if (policy->priority >= pol->priority) {
1557 			newpos = pol;
1558 			continue;
1559 		}
1560 		if (delpol)
1561 			break;
1562 	}
1563 
1564 	if (newpos)
1565 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1566 	else
1567 		hlist_add_head_rcu(&policy->bydst, chain);
1568 
1569 	return delpol;
1570 }
1571 
1572 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1573 {
1574 	struct net *net = xp_net(policy);
1575 	struct xfrm_policy *delpol;
1576 	struct hlist_head *chain;
1577 
1578 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1579 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1580 	if (chain)
1581 		delpol = xfrm_policy_insert_list(chain, policy, excl);
1582 	else
1583 		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1584 
1585 	if (IS_ERR(delpol)) {
1586 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1587 		return PTR_ERR(delpol);
1588 	}
1589 
1590 	__xfrm_policy_link(policy, dir);
1591 
1592 	/* After previous checking, family can either be AF_INET or AF_INET6 */
1593 	if (policy->family == AF_INET)
1594 		rt_genid_bump_ipv4(net);
1595 	else
1596 		rt_genid_bump_ipv6(net);
1597 
1598 	if (delpol) {
1599 		xfrm_policy_requeue(delpol, policy);
1600 		__xfrm_policy_unlink(delpol, dir);
1601 	}
1602 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1603 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1604 	policy->curlft.add_time = ktime_get_real_seconds();
1605 	policy->curlft.use_time = 0;
1606 	if (!mod_timer(&policy->timer, jiffies + HZ))
1607 		xfrm_pol_hold(policy);
1608 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1609 
1610 	if (delpol)
1611 		xfrm_policy_kill(delpol);
1612 	else if (xfrm_bydst_should_resize(net, dir, NULL))
1613 		schedule_work(&net->xfrm.policy_hash_work);
1614 
1615 	return 0;
1616 }
1617 EXPORT_SYMBOL(xfrm_policy_insert);
1618 
1619 static struct xfrm_policy *
1620 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1621 			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1622 			struct xfrm_sec_ctx *ctx)
1623 {
1624 	struct xfrm_policy *pol;
1625 
1626 	if (!chain)
1627 		return NULL;
1628 
1629 	hlist_for_each_entry(pol, chain, bydst) {
1630 		if (pol->type == type &&
1631 		    pol->if_id == if_id &&
1632 		    xfrm_policy_mark_match(mark, pol) &&
1633 		    !selector_cmp(sel, &pol->selector) &&
1634 		    xfrm_sec_ctx_match(ctx, pol->security))
1635 			return pol;
1636 	}
1637 
1638 	return NULL;
1639 }
1640 
1641 struct xfrm_policy *
1642 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1643 		      u8 type, int dir, struct xfrm_selector *sel,
1644 		      struct xfrm_sec_ctx *ctx, int delete, int *err)
1645 {
1646 	struct xfrm_pol_inexact_bin *bin = NULL;
1647 	struct xfrm_policy *pol, *ret = NULL;
1648 	struct hlist_head *chain;
1649 
1650 	*err = 0;
1651 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1652 	chain = policy_hash_bysel(net, sel, sel->family, dir);
1653 	if (!chain) {
1654 		struct xfrm_pol_inexact_candidates cand;
1655 		int i;
1656 
1657 		bin = xfrm_policy_inexact_lookup(net, type,
1658 						 sel->family, dir, if_id);
1659 		if (!bin) {
1660 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1661 			return NULL;
1662 		}
1663 
1664 		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1665 							 &sel->saddr,
1666 							 &sel->daddr)) {
1667 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1668 			return NULL;
1669 		}
1670 
1671 		pol = NULL;
1672 		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1673 			struct xfrm_policy *tmp;
1674 
1675 			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1676 						      if_id, type, dir,
1677 						      sel, ctx);
1678 			if (!tmp)
1679 				continue;
1680 
1681 			if (!pol || tmp->pos < pol->pos)
1682 				pol = tmp;
1683 		}
1684 	} else {
1685 		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1686 					      sel, ctx);
1687 	}
1688 
1689 	if (pol) {
1690 		xfrm_pol_hold(pol);
1691 		if (delete) {
1692 			*err = security_xfrm_policy_delete(pol->security);
1693 			if (*err) {
1694 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1695 				return pol;
1696 			}
1697 			__xfrm_policy_unlink(pol, dir);
1698 		}
1699 		ret = pol;
1700 	}
1701 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1702 
1703 	if (ret && delete)
1704 		xfrm_policy_kill(ret);
1705 	if (bin && delete)
1706 		xfrm_policy_inexact_prune_bin(bin);
1707 	return ret;
1708 }
1709 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1710 
1711 struct xfrm_policy *
1712 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1713 		 u8 type, int dir, u32 id, int delete, int *err)
1714 {
1715 	struct xfrm_policy *pol, *ret;
1716 	struct hlist_head *chain;
1717 
1718 	*err = -ENOENT;
1719 	if (xfrm_policy_id2dir(id) != dir)
1720 		return NULL;
1721 
1722 	*err = 0;
1723 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1724 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1725 	ret = NULL;
1726 	hlist_for_each_entry(pol, chain, byidx) {
1727 		if (pol->type == type && pol->index == id &&
1728 		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1729 			xfrm_pol_hold(pol);
1730 			if (delete) {
1731 				*err = security_xfrm_policy_delete(
1732 								pol->security);
1733 				if (*err) {
1734 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1735 					return pol;
1736 				}
1737 				__xfrm_policy_unlink(pol, dir);
1738 			}
1739 			ret = pol;
1740 			break;
1741 		}
1742 	}
1743 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1744 
1745 	if (ret && delete)
1746 		xfrm_policy_kill(ret);
1747 	return ret;
1748 }
1749 EXPORT_SYMBOL(xfrm_policy_byid);
1750 
1751 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1752 static inline int
1753 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1754 {
1755 	struct xfrm_policy *pol;
1756 	int err = 0;
1757 
1758 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1759 		if (pol->walk.dead ||
1760 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1761 		    pol->type != type)
1762 			continue;
1763 
1764 		err = security_xfrm_policy_delete(pol->security);
1765 		if (err) {
1766 			xfrm_audit_policy_delete(pol, 0, task_valid);
1767 			return err;
1768 		}
1769 	}
1770 	return err;
1771 }
1772 #else
1773 static inline int
1774 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1775 {
1776 	return 0;
1777 }
1778 #endif
1779 
1780 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1781 {
1782 	int dir, err = 0, cnt = 0;
1783 	struct xfrm_policy *pol;
1784 
1785 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1786 
1787 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1788 	if (err)
1789 		goto out;
1790 
1791 again:
1792 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1793 		dir = xfrm_policy_id2dir(pol->index);
1794 		if (pol->walk.dead ||
1795 		    dir >= XFRM_POLICY_MAX ||
1796 		    pol->type != type)
1797 			continue;
1798 
1799 		__xfrm_policy_unlink(pol, dir);
1800 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1801 		cnt++;
1802 		xfrm_audit_policy_delete(pol, 1, task_valid);
1803 		xfrm_policy_kill(pol);
1804 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1805 		goto again;
1806 	}
1807 	if (cnt)
1808 		__xfrm_policy_inexact_flush(net);
1809 	else
1810 		err = -ESRCH;
1811 out:
1812 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1813 	return err;
1814 }
1815 EXPORT_SYMBOL(xfrm_policy_flush);
1816 
1817 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1818 		     int (*func)(struct xfrm_policy *, int, int, void*),
1819 		     void *data)
1820 {
1821 	struct xfrm_policy *pol;
1822 	struct xfrm_policy_walk_entry *x;
1823 	int error = 0;
1824 
1825 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1826 	    walk->type != XFRM_POLICY_TYPE_ANY)
1827 		return -EINVAL;
1828 
1829 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1830 		return 0;
1831 
1832 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1833 	if (list_empty(&walk->walk.all))
1834 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1835 	else
1836 		x = list_first_entry(&walk->walk.all,
1837 				     struct xfrm_policy_walk_entry, all);
1838 
1839 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1840 		if (x->dead)
1841 			continue;
1842 		pol = container_of(x, struct xfrm_policy, walk);
1843 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1844 		    walk->type != pol->type)
1845 			continue;
1846 		error = func(pol, xfrm_policy_id2dir(pol->index),
1847 			     walk->seq, data);
1848 		if (error) {
1849 			list_move_tail(&walk->walk.all, &x->all);
1850 			goto out;
1851 		}
1852 		walk->seq++;
1853 	}
1854 	if (walk->seq == 0) {
1855 		error = -ENOENT;
1856 		goto out;
1857 	}
1858 	list_del_init(&walk->walk.all);
1859 out:
1860 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1861 	return error;
1862 }
1863 EXPORT_SYMBOL(xfrm_policy_walk);
1864 
1865 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1866 {
1867 	INIT_LIST_HEAD(&walk->walk.all);
1868 	walk->walk.dead = 1;
1869 	walk->type = type;
1870 	walk->seq = 0;
1871 }
1872 EXPORT_SYMBOL(xfrm_policy_walk_init);
1873 
1874 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1875 {
1876 	if (list_empty(&walk->walk.all))
1877 		return;
1878 
1879 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1880 	list_del(&walk->walk.all);
1881 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1882 }
1883 EXPORT_SYMBOL(xfrm_policy_walk_done);
1884 
1885 /*
1886  * Find policy to apply to this flow.
1887  *
1888  * Returns 0 if policy found, else an -errno.
1889  */
1890 static int xfrm_policy_match(const struct xfrm_policy *pol,
1891 			     const struct flowi *fl,
1892 			     u8 type, u16 family, u32 if_id)
1893 {
1894 	const struct xfrm_selector *sel = &pol->selector;
1895 	int ret = -ESRCH;
1896 	bool match;
1897 
1898 	if (pol->family != family ||
1899 	    pol->if_id != if_id ||
1900 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1901 	    pol->type != type)
1902 		return ret;
1903 
1904 	match = xfrm_selector_match(sel, fl, family);
1905 	if (match)
1906 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1907 	return ret;
1908 }
1909 
1910 static struct xfrm_pol_inexact_node *
1911 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1912 				seqcount_spinlock_t *count,
1913 				const xfrm_address_t *addr, u16 family)
1914 {
1915 	const struct rb_node *parent;
1916 	int seq;
1917 
1918 again:
1919 	seq = read_seqcount_begin(count);
1920 
1921 	parent = rcu_dereference_raw(r->rb_node);
1922 	while (parent) {
1923 		struct xfrm_pol_inexact_node *node;
1924 		int delta;
1925 
1926 		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1927 
1928 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1929 					       node->prefixlen, family);
1930 		if (delta < 0) {
1931 			parent = rcu_dereference_raw(parent->rb_left);
1932 			continue;
1933 		} else if (delta > 0) {
1934 			parent = rcu_dereference_raw(parent->rb_right);
1935 			continue;
1936 		}
1937 
1938 		return node;
1939 	}
1940 
1941 	if (read_seqcount_retry(count, seq))
1942 		goto again;
1943 
1944 	return NULL;
1945 }
1946 
1947 static bool
1948 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1949 				    struct xfrm_pol_inexact_bin *b,
1950 				    const xfrm_address_t *saddr,
1951 				    const xfrm_address_t *daddr)
1952 {
1953 	struct xfrm_pol_inexact_node *n;
1954 	u16 family;
1955 
1956 	if (!b)
1957 		return false;
1958 
1959 	family = b->k.family;
1960 	memset(cand, 0, sizeof(*cand));
1961 	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1962 
1963 	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1964 					    family);
1965 	if (n) {
1966 		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1967 		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1968 						    family);
1969 		if (n)
1970 			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1971 	}
1972 
1973 	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1974 					    family);
1975 	if (n)
1976 		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1977 
1978 	return true;
1979 }
1980 
1981 static struct xfrm_pol_inexact_bin *
1982 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1983 			       u8 dir, u32 if_id)
1984 {
1985 	struct xfrm_pol_inexact_key k = {
1986 		.family = family,
1987 		.type = type,
1988 		.dir = dir,
1989 		.if_id = if_id,
1990 	};
1991 
1992 	write_pnet(&k.net, net);
1993 
1994 	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1995 				 xfrm_pol_inexact_params);
1996 }
1997 
1998 static struct xfrm_pol_inexact_bin *
1999 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2000 			   u8 dir, u32 if_id)
2001 {
2002 	struct xfrm_pol_inexact_bin *bin;
2003 
2004 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2005 
2006 	rcu_read_lock();
2007 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2008 	rcu_read_unlock();
2009 
2010 	return bin;
2011 }
2012 
2013 static struct xfrm_policy *
2014 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2015 			      struct xfrm_policy *prefer,
2016 			      const struct flowi *fl,
2017 			      u8 type, u16 family, u32 if_id)
2018 {
2019 	u32 priority = prefer ? prefer->priority : ~0u;
2020 	struct xfrm_policy *pol;
2021 
2022 	if (!chain)
2023 		return NULL;
2024 
2025 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2026 		int err;
2027 
2028 		if (pol->priority > priority)
2029 			break;
2030 
2031 		err = xfrm_policy_match(pol, fl, type, family, if_id);
2032 		if (err) {
2033 			if (err != -ESRCH)
2034 				return ERR_PTR(err);
2035 
2036 			continue;
2037 		}
2038 
2039 		if (prefer) {
2040 			/* matches.  Is it older than *prefer? */
2041 			if (pol->priority == priority &&
2042 			    prefer->pos < pol->pos)
2043 				return prefer;
2044 		}
2045 
2046 		return pol;
2047 	}
2048 
2049 	return NULL;
2050 }
2051 
2052 static struct xfrm_policy *
2053 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2054 			    struct xfrm_policy *prefer,
2055 			    const struct flowi *fl,
2056 			    u8 type, u16 family, u32 if_id)
2057 {
2058 	struct xfrm_policy *tmp;
2059 	int i;
2060 
2061 	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2062 		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2063 						    prefer,
2064 						    fl, type, family, if_id);
2065 		if (!tmp)
2066 			continue;
2067 
2068 		if (IS_ERR(tmp))
2069 			return tmp;
2070 		prefer = tmp;
2071 	}
2072 
2073 	return prefer;
2074 }
2075 
2076 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2077 						     const struct flowi *fl,
2078 						     u16 family, u8 dir,
2079 						     u32 if_id)
2080 {
2081 	struct xfrm_pol_inexact_candidates cand;
2082 	const xfrm_address_t *daddr, *saddr;
2083 	struct xfrm_pol_inexact_bin *bin;
2084 	struct xfrm_policy *pol, *ret;
2085 	struct hlist_head *chain;
2086 	unsigned int sequence;
2087 	int err;
2088 
2089 	daddr = xfrm_flowi_daddr(fl, family);
2090 	saddr = xfrm_flowi_saddr(fl, family);
2091 	if (unlikely(!daddr || !saddr))
2092 		return NULL;
2093 
2094 	rcu_read_lock();
2095  retry:
2096 	do {
2097 		sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2098 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2099 	} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2100 
2101 	ret = NULL;
2102 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2103 		err = xfrm_policy_match(pol, fl, type, family, if_id);
2104 		if (err) {
2105 			if (err == -ESRCH)
2106 				continue;
2107 			else {
2108 				ret = ERR_PTR(err);
2109 				goto fail;
2110 			}
2111 		} else {
2112 			ret = pol;
2113 			break;
2114 		}
2115 	}
2116 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2117 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2118 							 daddr))
2119 		goto skip_inexact;
2120 
2121 	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2122 					  family, if_id);
2123 	if (pol) {
2124 		ret = pol;
2125 		if (IS_ERR(pol))
2126 			goto fail;
2127 	}
2128 
2129 skip_inexact:
2130 	if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2131 		goto retry;
2132 
2133 	if (ret && !xfrm_pol_hold_rcu(ret))
2134 		goto retry;
2135 fail:
2136 	rcu_read_unlock();
2137 
2138 	return ret;
2139 }
2140 
2141 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2142 					      const struct flowi *fl,
2143 					      u16 family, u8 dir, u32 if_id)
2144 {
2145 #ifdef CONFIG_XFRM_SUB_POLICY
2146 	struct xfrm_policy *pol;
2147 
2148 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2149 					dir, if_id);
2150 	if (pol != NULL)
2151 		return pol;
2152 #endif
2153 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2154 					 dir, if_id);
2155 }
2156 
2157 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2158 						 const struct flowi *fl,
2159 						 u16 family, u32 if_id)
2160 {
2161 	struct xfrm_policy *pol;
2162 
2163 	rcu_read_lock();
2164  again:
2165 	pol = rcu_dereference(sk->sk_policy[dir]);
2166 	if (pol != NULL) {
2167 		bool match;
2168 		int err = 0;
2169 
2170 		if (pol->family != family) {
2171 			pol = NULL;
2172 			goto out;
2173 		}
2174 
2175 		match = xfrm_selector_match(&pol->selector, fl, family);
2176 		if (match) {
2177 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2178 			    pol->if_id != if_id) {
2179 				pol = NULL;
2180 				goto out;
2181 			}
2182 			err = security_xfrm_policy_lookup(pol->security,
2183 						      fl->flowi_secid);
2184 			if (!err) {
2185 				if (!xfrm_pol_hold_rcu(pol))
2186 					goto again;
2187 			} else if (err == -ESRCH) {
2188 				pol = NULL;
2189 			} else {
2190 				pol = ERR_PTR(err);
2191 			}
2192 		} else
2193 			pol = NULL;
2194 	}
2195 out:
2196 	rcu_read_unlock();
2197 	return pol;
2198 }
2199 
2200 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2201 {
2202 	struct net *net = xp_net(pol);
2203 
2204 	list_add(&pol->walk.all, &net->xfrm.policy_all);
2205 	net->xfrm.policy_count[dir]++;
2206 	xfrm_pol_hold(pol);
2207 }
2208 
2209 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2210 						int dir)
2211 {
2212 	struct net *net = xp_net(pol);
2213 
2214 	if (list_empty(&pol->walk.all))
2215 		return NULL;
2216 
2217 	/* Socket policies are not hashed. */
2218 	if (!hlist_unhashed(&pol->bydst)) {
2219 		hlist_del_rcu(&pol->bydst);
2220 		hlist_del_init(&pol->bydst_inexact_list);
2221 		hlist_del(&pol->byidx);
2222 	}
2223 
2224 	list_del_init(&pol->walk.all);
2225 	net->xfrm.policy_count[dir]--;
2226 
2227 	return pol;
2228 }
2229 
2230 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2231 {
2232 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2233 }
2234 
2235 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2236 {
2237 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2238 }
2239 
2240 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2241 {
2242 	struct net *net = xp_net(pol);
2243 
2244 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2245 	pol = __xfrm_policy_unlink(pol, dir);
2246 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2247 	if (pol) {
2248 		xfrm_policy_kill(pol);
2249 		return 0;
2250 	}
2251 	return -ENOENT;
2252 }
2253 EXPORT_SYMBOL(xfrm_policy_delete);
2254 
2255 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2256 {
2257 	struct net *net = sock_net(sk);
2258 	struct xfrm_policy *old_pol;
2259 
2260 #ifdef CONFIG_XFRM_SUB_POLICY
2261 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2262 		return -EINVAL;
2263 #endif
2264 
2265 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2266 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2267 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2268 	if (pol) {
2269 		pol->curlft.add_time = ktime_get_real_seconds();
2270 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2271 		xfrm_sk_policy_link(pol, dir);
2272 	}
2273 	rcu_assign_pointer(sk->sk_policy[dir], pol);
2274 	if (old_pol) {
2275 		if (pol)
2276 			xfrm_policy_requeue(old_pol, pol);
2277 
2278 		/* Unlinking succeeds always. This is the only function
2279 		 * allowed to delete or replace socket policy.
2280 		 */
2281 		xfrm_sk_policy_unlink(old_pol, dir);
2282 	}
2283 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2284 
2285 	if (old_pol) {
2286 		xfrm_policy_kill(old_pol);
2287 	}
2288 	return 0;
2289 }
2290 
2291 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2292 {
2293 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2294 	struct net *net = xp_net(old);
2295 
2296 	if (newp) {
2297 		newp->selector = old->selector;
2298 		if (security_xfrm_policy_clone(old->security,
2299 					       &newp->security)) {
2300 			kfree(newp);
2301 			return NULL;  /* ENOMEM */
2302 		}
2303 		newp->lft = old->lft;
2304 		newp->curlft = old->curlft;
2305 		newp->mark = old->mark;
2306 		newp->if_id = old->if_id;
2307 		newp->action = old->action;
2308 		newp->flags = old->flags;
2309 		newp->xfrm_nr = old->xfrm_nr;
2310 		newp->index = old->index;
2311 		newp->type = old->type;
2312 		newp->family = old->family;
2313 		memcpy(newp->xfrm_vec, old->xfrm_vec,
2314 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2315 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2316 		xfrm_sk_policy_link(newp, dir);
2317 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2318 		xfrm_pol_put(newp);
2319 	}
2320 	return newp;
2321 }
2322 
2323 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2324 {
2325 	const struct xfrm_policy *p;
2326 	struct xfrm_policy *np;
2327 	int i, ret = 0;
2328 
2329 	rcu_read_lock();
2330 	for (i = 0; i < 2; i++) {
2331 		p = rcu_dereference(osk->sk_policy[i]);
2332 		if (p) {
2333 			np = clone_policy(p, i);
2334 			if (unlikely(!np)) {
2335 				ret = -ENOMEM;
2336 				break;
2337 			}
2338 			rcu_assign_pointer(sk->sk_policy[i], np);
2339 		}
2340 	}
2341 	rcu_read_unlock();
2342 	return ret;
2343 }
2344 
2345 static int
2346 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2347 	       xfrm_address_t *remote, unsigned short family, u32 mark)
2348 {
2349 	int err;
2350 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2351 
2352 	if (unlikely(afinfo == NULL))
2353 		return -EINVAL;
2354 	err = afinfo->get_saddr(net, oif, local, remote, mark);
2355 	rcu_read_unlock();
2356 	return err;
2357 }
2358 
2359 /* Resolve list of templates for the flow, given policy. */
2360 
2361 static int
2362 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2363 		      struct xfrm_state **xfrm, unsigned short family)
2364 {
2365 	struct net *net = xp_net(policy);
2366 	int nx;
2367 	int i, error;
2368 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2369 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2370 	xfrm_address_t tmp;
2371 
2372 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2373 		struct xfrm_state *x;
2374 		xfrm_address_t *remote = daddr;
2375 		xfrm_address_t *local  = saddr;
2376 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2377 
2378 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2379 		    tmpl->mode == XFRM_MODE_BEET) {
2380 			remote = &tmpl->id.daddr;
2381 			local = &tmpl->saddr;
2382 			if (xfrm_addr_any(local, tmpl->encap_family)) {
2383 				error = xfrm_get_saddr(net, fl->flowi_oif,
2384 						       &tmp, remote,
2385 						       tmpl->encap_family, 0);
2386 				if (error)
2387 					goto fail;
2388 				local = &tmp;
2389 			}
2390 		}
2391 
2392 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2393 				    family, policy->if_id);
2394 
2395 		if (x && x->km.state == XFRM_STATE_VALID) {
2396 			xfrm[nx++] = x;
2397 			daddr = remote;
2398 			saddr = local;
2399 			continue;
2400 		}
2401 		if (x) {
2402 			error = (x->km.state == XFRM_STATE_ERROR ?
2403 				 -EINVAL : -EAGAIN);
2404 			xfrm_state_put(x);
2405 		} else if (error == -ESRCH) {
2406 			error = -EAGAIN;
2407 		}
2408 
2409 		if (!tmpl->optional)
2410 			goto fail;
2411 	}
2412 	return nx;
2413 
2414 fail:
2415 	for (nx--; nx >= 0; nx--)
2416 		xfrm_state_put(xfrm[nx]);
2417 	return error;
2418 }
2419 
2420 static int
2421 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2422 		  struct xfrm_state **xfrm, unsigned short family)
2423 {
2424 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2425 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2426 	int cnx = 0;
2427 	int error;
2428 	int ret;
2429 	int i;
2430 
2431 	for (i = 0; i < npols; i++) {
2432 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2433 			error = -ENOBUFS;
2434 			goto fail;
2435 		}
2436 
2437 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2438 		if (ret < 0) {
2439 			error = ret;
2440 			goto fail;
2441 		} else
2442 			cnx += ret;
2443 	}
2444 
2445 	/* found states are sorted for outbound processing */
2446 	if (npols > 1)
2447 		xfrm_state_sort(xfrm, tpp, cnx, family);
2448 
2449 	return cnx;
2450 
2451  fail:
2452 	for (cnx--; cnx >= 0; cnx--)
2453 		xfrm_state_put(tpp[cnx]);
2454 	return error;
2455 
2456 }
2457 
2458 static int xfrm_get_tos(const struct flowi *fl, int family)
2459 {
2460 	if (family == AF_INET)
2461 		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2462 
2463 	return 0;
2464 }
2465 
2466 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2467 {
2468 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2469 	struct dst_ops *dst_ops;
2470 	struct xfrm_dst *xdst;
2471 
2472 	if (!afinfo)
2473 		return ERR_PTR(-EINVAL);
2474 
2475 	switch (family) {
2476 	case AF_INET:
2477 		dst_ops = &net->xfrm.xfrm4_dst_ops;
2478 		break;
2479 #if IS_ENABLED(CONFIG_IPV6)
2480 	case AF_INET6:
2481 		dst_ops = &net->xfrm.xfrm6_dst_ops;
2482 		break;
2483 #endif
2484 	default:
2485 		BUG();
2486 	}
2487 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2488 
2489 	if (likely(xdst)) {
2490 		memset_after(xdst, 0, u.dst);
2491 	} else
2492 		xdst = ERR_PTR(-ENOBUFS);
2493 
2494 	rcu_read_unlock();
2495 
2496 	return xdst;
2497 }
2498 
2499 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2500 			   int nfheader_len)
2501 {
2502 	if (dst->ops->family == AF_INET6) {
2503 		struct rt6_info *rt = (struct rt6_info *)dst;
2504 		path->path_cookie = rt6_get_cookie(rt);
2505 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2506 	}
2507 }
2508 
2509 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2510 				const struct flowi *fl)
2511 {
2512 	const struct xfrm_policy_afinfo *afinfo =
2513 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2514 	int err;
2515 
2516 	if (!afinfo)
2517 		return -EINVAL;
2518 
2519 	err = afinfo->fill_dst(xdst, dev, fl);
2520 
2521 	rcu_read_unlock();
2522 
2523 	return err;
2524 }
2525 
2526 
2527 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2528  * all the metrics... Shortly, bundle a bundle.
2529  */
2530 
2531 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2532 					    struct xfrm_state **xfrm,
2533 					    struct xfrm_dst **bundle,
2534 					    int nx,
2535 					    const struct flowi *fl,
2536 					    struct dst_entry *dst)
2537 {
2538 	const struct xfrm_state_afinfo *afinfo;
2539 	const struct xfrm_mode *inner_mode;
2540 	struct net *net = xp_net(policy);
2541 	unsigned long now = jiffies;
2542 	struct net_device *dev;
2543 	struct xfrm_dst *xdst_prev = NULL;
2544 	struct xfrm_dst *xdst0 = NULL;
2545 	int i = 0;
2546 	int err;
2547 	int header_len = 0;
2548 	int nfheader_len = 0;
2549 	int trailer_len = 0;
2550 	int tos;
2551 	int family = policy->selector.family;
2552 	xfrm_address_t saddr, daddr;
2553 
2554 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2555 
2556 	tos = xfrm_get_tos(fl, family);
2557 
2558 	dst_hold(dst);
2559 
2560 	for (; i < nx; i++) {
2561 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2562 		struct dst_entry *dst1 = &xdst->u.dst;
2563 
2564 		err = PTR_ERR(xdst);
2565 		if (IS_ERR(xdst)) {
2566 			dst_release(dst);
2567 			goto put_states;
2568 		}
2569 
2570 		bundle[i] = xdst;
2571 		if (!xdst_prev)
2572 			xdst0 = xdst;
2573 		else
2574 			/* Ref count is taken during xfrm_alloc_dst()
2575 			 * No need to do dst_clone() on dst1
2576 			 */
2577 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2578 
2579 		if (xfrm[i]->sel.family == AF_UNSPEC) {
2580 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2581 							xfrm_af2proto(family));
2582 			if (!inner_mode) {
2583 				err = -EAFNOSUPPORT;
2584 				dst_release(dst);
2585 				goto put_states;
2586 			}
2587 		} else
2588 			inner_mode = &xfrm[i]->inner_mode;
2589 
2590 		xdst->route = dst;
2591 		dst_copy_metrics(dst1, dst);
2592 
2593 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2594 			__u32 mark = 0;
2595 			int oif;
2596 
2597 			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2598 				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2599 
2600 			family = xfrm[i]->props.family;
2601 			oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2602 			dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2603 					      &saddr, &daddr, family, mark);
2604 			err = PTR_ERR(dst);
2605 			if (IS_ERR(dst))
2606 				goto put_states;
2607 		} else
2608 			dst_hold(dst);
2609 
2610 		dst1->xfrm = xfrm[i];
2611 		xdst->xfrm_genid = xfrm[i]->genid;
2612 
2613 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2614 		dst1->lastuse = now;
2615 
2616 		dst1->input = dst_discard;
2617 
2618 		rcu_read_lock();
2619 		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2620 		if (likely(afinfo))
2621 			dst1->output = afinfo->output;
2622 		else
2623 			dst1->output = dst_discard_out;
2624 		rcu_read_unlock();
2625 
2626 		xdst_prev = xdst;
2627 
2628 		header_len += xfrm[i]->props.header_len;
2629 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2630 			nfheader_len += xfrm[i]->props.header_len;
2631 		trailer_len += xfrm[i]->props.trailer_len;
2632 	}
2633 
2634 	xfrm_dst_set_child(xdst_prev, dst);
2635 	xdst0->path = dst;
2636 
2637 	err = -ENODEV;
2638 	dev = dst->dev;
2639 	if (!dev)
2640 		goto free_dst;
2641 
2642 	xfrm_init_path(xdst0, dst, nfheader_len);
2643 	xfrm_init_pmtu(bundle, nx);
2644 
2645 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2646 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2647 		err = xfrm_fill_dst(xdst_prev, dev, fl);
2648 		if (err)
2649 			goto free_dst;
2650 
2651 		xdst_prev->u.dst.header_len = header_len;
2652 		xdst_prev->u.dst.trailer_len = trailer_len;
2653 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2654 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2655 	}
2656 
2657 	return &xdst0->u.dst;
2658 
2659 put_states:
2660 	for (; i < nx; i++)
2661 		xfrm_state_put(xfrm[i]);
2662 free_dst:
2663 	if (xdst0)
2664 		dst_release_immediate(&xdst0->u.dst);
2665 
2666 	return ERR_PTR(err);
2667 }
2668 
2669 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2670 				struct xfrm_policy **pols,
2671 				int *num_pols, int *num_xfrms)
2672 {
2673 	int i;
2674 
2675 	if (*num_pols == 0 || !pols[0]) {
2676 		*num_pols = 0;
2677 		*num_xfrms = 0;
2678 		return 0;
2679 	}
2680 	if (IS_ERR(pols[0])) {
2681 		*num_pols = 0;
2682 		return PTR_ERR(pols[0]);
2683 	}
2684 
2685 	*num_xfrms = pols[0]->xfrm_nr;
2686 
2687 #ifdef CONFIG_XFRM_SUB_POLICY
2688 	if (pols[0]->action == XFRM_POLICY_ALLOW &&
2689 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2690 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2691 						    XFRM_POLICY_TYPE_MAIN,
2692 						    fl, family,
2693 						    XFRM_POLICY_OUT,
2694 						    pols[0]->if_id);
2695 		if (pols[1]) {
2696 			if (IS_ERR(pols[1])) {
2697 				xfrm_pols_put(pols, *num_pols);
2698 				*num_pols = 0;
2699 				return PTR_ERR(pols[1]);
2700 			}
2701 			(*num_pols)++;
2702 			(*num_xfrms) += pols[1]->xfrm_nr;
2703 		}
2704 	}
2705 #endif
2706 	for (i = 0; i < *num_pols; i++) {
2707 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2708 			*num_xfrms = -1;
2709 			break;
2710 		}
2711 	}
2712 
2713 	return 0;
2714 
2715 }
2716 
2717 static struct xfrm_dst *
2718 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2719 			       const struct flowi *fl, u16 family,
2720 			       struct dst_entry *dst_orig)
2721 {
2722 	struct net *net = xp_net(pols[0]);
2723 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2724 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2725 	struct xfrm_dst *xdst;
2726 	struct dst_entry *dst;
2727 	int err;
2728 
2729 	/* Try to instantiate a bundle */
2730 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2731 	if (err <= 0) {
2732 		if (err == 0)
2733 			return NULL;
2734 
2735 		if (err != -EAGAIN)
2736 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2737 		return ERR_PTR(err);
2738 	}
2739 
2740 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2741 	if (IS_ERR(dst)) {
2742 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2743 		return ERR_CAST(dst);
2744 	}
2745 
2746 	xdst = (struct xfrm_dst *)dst;
2747 	xdst->num_xfrms = err;
2748 	xdst->num_pols = num_pols;
2749 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2750 	xdst->policy_genid = atomic_read(&pols[0]->genid);
2751 
2752 	return xdst;
2753 }
2754 
2755 static void xfrm_policy_queue_process(struct timer_list *t)
2756 {
2757 	struct sk_buff *skb;
2758 	struct sock *sk;
2759 	struct dst_entry *dst;
2760 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2761 	struct net *net = xp_net(pol);
2762 	struct xfrm_policy_queue *pq = &pol->polq;
2763 	struct flowi fl;
2764 	struct sk_buff_head list;
2765 	__u32 skb_mark;
2766 
2767 	spin_lock(&pq->hold_queue.lock);
2768 	skb = skb_peek(&pq->hold_queue);
2769 	if (!skb) {
2770 		spin_unlock(&pq->hold_queue.lock);
2771 		goto out;
2772 	}
2773 	dst = skb_dst(skb);
2774 	sk = skb->sk;
2775 
2776 	/* Fixup the mark to support VTI. */
2777 	skb_mark = skb->mark;
2778 	skb->mark = pol->mark.v;
2779 	xfrm_decode_session(skb, &fl, dst->ops->family);
2780 	skb->mark = skb_mark;
2781 	spin_unlock(&pq->hold_queue.lock);
2782 
2783 	dst_hold(xfrm_dst_path(dst));
2784 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2785 	if (IS_ERR(dst))
2786 		goto purge_queue;
2787 
2788 	if (dst->flags & DST_XFRM_QUEUE) {
2789 		dst_release(dst);
2790 
2791 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2792 			goto purge_queue;
2793 
2794 		pq->timeout = pq->timeout << 1;
2795 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2796 			xfrm_pol_hold(pol);
2797 		goto out;
2798 	}
2799 
2800 	dst_release(dst);
2801 
2802 	__skb_queue_head_init(&list);
2803 
2804 	spin_lock(&pq->hold_queue.lock);
2805 	pq->timeout = 0;
2806 	skb_queue_splice_init(&pq->hold_queue, &list);
2807 	spin_unlock(&pq->hold_queue.lock);
2808 
2809 	while (!skb_queue_empty(&list)) {
2810 		skb = __skb_dequeue(&list);
2811 
2812 		/* Fixup the mark to support VTI. */
2813 		skb_mark = skb->mark;
2814 		skb->mark = pol->mark.v;
2815 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2816 		skb->mark = skb_mark;
2817 
2818 		dst_hold(xfrm_dst_path(skb_dst(skb)));
2819 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2820 		if (IS_ERR(dst)) {
2821 			kfree_skb(skb);
2822 			continue;
2823 		}
2824 
2825 		nf_reset_ct(skb);
2826 		skb_dst_drop(skb);
2827 		skb_dst_set(skb, dst);
2828 
2829 		dst_output(net, skb->sk, skb);
2830 	}
2831 
2832 out:
2833 	xfrm_pol_put(pol);
2834 	return;
2835 
2836 purge_queue:
2837 	pq->timeout = 0;
2838 	skb_queue_purge(&pq->hold_queue);
2839 	xfrm_pol_put(pol);
2840 }
2841 
2842 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2843 {
2844 	unsigned long sched_next;
2845 	struct dst_entry *dst = skb_dst(skb);
2846 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2847 	struct xfrm_policy *pol = xdst->pols[0];
2848 	struct xfrm_policy_queue *pq = &pol->polq;
2849 
2850 	if (unlikely(skb_fclone_busy(sk, skb))) {
2851 		kfree_skb(skb);
2852 		return 0;
2853 	}
2854 
2855 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2856 		kfree_skb(skb);
2857 		return -EAGAIN;
2858 	}
2859 
2860 	skb_dst_force(skb);
2861 
2862 	spin_lock_bh(&pq->hold_queue.lock);
2863 
2864 	if (!pq->timeout)
2865 		pq->timeout = XFRM_QUEUE_TMO_MIN;
2866 
2867 	sched_next = jiffies + pq->timeout;
2868 
2869 	if (del_timer(&pq->hold_timer)) {
2870 		if (time_before(pq->hold_timer.expires, sched_next))
2871 			sched_next = pq->hold_timer.expires;
2872 		xfrm_pol_put(pol);
2873 	}
2874 
2875 	__skb_queue_tail(&pq->hold_queue, skb);
2876 	if (!mod_timer(&pq->hold_timer, sched_next))
2877 		xfrm_pol_hold(pol);
2878 
2879 	spin_unlock_bh(&pq->hold_queue.lock);
2880 
2881 	return 0;
2882 }
2883 
2884 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2885 						 struct xfrm_flo *xflo,
2886 						 const struct flowi *fl,
2887 						 int num_xfrms,
2888 						 u16 family)
2889 {
2890 	int err;
2891 	struct net_device *dev;
2892 	struct dst_entry *dst;
2893 	struct dst_entry *dst1;
2894 	struct xfrm_dst *xdst;
2895 
2896 	xdst = xfrm_alloc_dst(net, family);
2897 	if (IS_ERR(xdst))
2898 		return xdst;
2899 
2900 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2901 	    net->xfrm.sysctl_larval_drop ||
2902 	    num_xfrms <= 0)
2903 		return xdst;
2904 
2905 	dst = xflo->dst_orig;
2906 	dst1 = &xdst->u.dst;
2907 	dst_hold(dst);
2908 	xdst->route = dst;
2909 
2910 	dst_copy_metrics(dst1, dst);
2911 
2912 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2913 	dst1->flags |= DST_XFRM_QUEUE;
2914 	dst1->lastuse = jiffies;
2915 
2916 	dst1->input = dst_discard;
2917 	dst1->output = xdst_queue_output;
2918 
2919 	dst_hold(dst);
2920 	xfrm_dst_set_child(xdst, dst);
2921 	xdst->path = dst;
2922 
2923 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2924 
2925 	err = -ENODEV;
2926 	dev = dst->dev;
2927 	if (!dev)
2928 		goto free_dst;
2929 
2930 	err = xfrm_fill_dst(xdst, dev, fl);
2931 	if (err)
2932 		goto free_dst;
2933 
2934 out:
2935 	return xdst;
2936 
2937 free_dst:
2938 	dst_release(dst1);
2939 	xdst = ERR_PTR(err);
2940 	goto out;
2941 }
2942 
2943 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2944 					   const struct flowi *fl,
2945 					   u16 family, u8 dir,
2946 					   struct xfrm_flo *xflo, u32 if_id)
2947 {
2948 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2949 	int num_pols = 0, num_xfrms = 0, err;
2950 	struct xfrm_dst *xdst;
2951 
2952 	/* Resolve policies to use if we couldn't get them from
2953 	 * previous cache entry */
2954 	num_pols = 1;
2955 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2956 	err = xfrm_expand_policies(fl, family, pols,
2957 					   &num_pols, &num_xfrms);
2958 	if (err < 0)
2959 		goto inc_error;
2960 	if (num_pols == 0)
2961 		return NULL;
2962 	if (num_xfrms <= 0)
2963 		goto make_dummy_bundle;
2964 
2965 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2966 					      xflo->dst_orig);
2967 	if (IS_ERR(xdst)) {
2968 		err = PTR_ERR(xdst);
2969 		if (err == -EREMOTE) {
2970 			xfrm_pols_put(pols, num_pols);
2971 			return NULL;
2972 		}
2973 
2974 		if (err != -EAGAIN)
2975 			goto error;
2976 		goto make_dummy_bundle;
2977 	} else if (xdst == NULL) {
2978 		num_xfrms = 0;
2979 		goto make_dummy_bundle;
2980 	}
2981 
2982 	return xdst;
2983 
2984 make_dummy_bundle:
2985 	/* We found policies, but there's no bundles to instantiate:
2986 	 * either because the policy blocks, has no transformations or
2987 	 * we could not build template (no xfrm_states).*/
2988 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2989 	if (IS_ERR(xdst)) {
2990 		xfrm_pols_put(pols, num_pols);
2991 		return ERR_CAST(xdst);
2992 	}
2993 	xdst->num_pols = num_pols;
2994 	xdst->num_xfrms = num_xfrms;
2995 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2996 
2997 	return xdst;
2998 
2999 inc_error:
3000 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3001 error:
3002 	xfrm_pols_put(pols, num_pols);
3003 	return ERR_PTR(err);
3004 }
3005 
3006 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3007 					struct dst_entry *dst_orig)
3008 {
3009 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3010 	struct dst_entry *ret;
3011 
3012 	if (!afinfo) {
3013 		dst_release(dst_orig);
3014 		return ERR_PTR(-EINVAL);
3015 	} else {
3016 		ret = afinfo->blackhole_route(net, dst_orig);
3017 	}
3018 	rcu_read_unlock();
3019 
3020 	return ret;
3021 }
3022 
3023 /* Finds/creates a bundle for given flow and if_id
3024  *
3025  * At the moment we eat a raw IP route. Mostly to speed up lookups
3026  * on interfaces with disabled IPsec.
3027  *
3028  * xfrm_lookup uses an if_id of 0 by default, and is provided for
3029  * compatibility
3030  */
3031 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3032 					struct dst_entry *dst_orig,
3033 					const struct flowi *fl,
3034 					const struct sock *sk,
3035 					int flags, u32 if_id)
3036 {
3037 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3038 	struct xfrm_dst *xdst;
3039 	struct dst_entry *dst, *route;
3040 	u16 family = dst_orig->ops->family;
3041 	u8 dir = XFRM_POLICY_OUT;
3042 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3043 
3044 	dst = NULL;
3045 	xdst = NULL;
3046 	route = NULL;
3047 
3048 	sk = sk_const_to_full_sk(sk);
3049 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3050 		num_pols = 1;
3051 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3052 						if_id);
3053 		err = xfrm_expand_policies(fl, family, pols,
3054 					   &num_pols, &num_xfrms);
3055 		if (err < 0)
3056 			goto dropdst;
3057 
3058 		if (num_pols) {
3059 			if (num_xfrms <= 0) {
3060 				drop_pols = num_pols;
3061 				goto no_transform;
3062 			}
3063 
3064 			xdst = xfrm_resolve_and_create_bundle(
3065 					pols, num_pols, fl,
3066 					family, dst_orig);
3067 
3068 			if (IS_ERR(xdst)) {
3069 				xfrm_pols_put(pols, num_pols);
3070 				err = PTR_ERR(xdst);
3071 				if (err == -EREMOTE)
3072 					goto nopol;
3073 
3074 				goto dropdst;
3075 			} else if (xdst == NULL) {
3076 				num_xfrms = 0;
3077 				drop_pols = num_pols;
3078 				goto no_transform;
3079 			}
3080 
3081 			route = xdst->route;
3082 		}
3083 	}
3084 
3085 	if (xdst == NULL) {
3086 		struct xfrm_flo xflo;
3087 
3088 		xflo.dst_orig = dst_orig;
3089 		xflo.flags = flags;
3090 
3091 		/* To accelerate a bit...  */
3092 		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3093 			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3094 			goto nopol;
3095 
3096 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3097 		if (xdst == NULL)
3098 			goto nopol;
3099 		if (IS_ERR(xdst)) {
3100 			err = PTR_ERR(xdst);
3101 			goto dropdst;
3102 		}
3103 
3104 		num_pols = xdst->num_pols;
3105 		num_xfrms = xdst->num_xfrms;
3106 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3107 		route = xdst->route;
3108 	}
3109 
3110 	dst = &xdst->u.dst;
3111 	if (route == NULL && num_xfrms > 0) {
3112 		/* The only case when xfrm_bundle_lookup() returns a
3113 		 * bundle with null route, is when the template could
3114 		 * not be resolved. It means policies are there, but
3115 		 * bundle could not be created, since we don't yet
3116 		 * have the xfrm_state's. We need to wait for KM to
3117 		 * negotiate new SA's or bail out with error.*/
3118 		if (net->xfrm.sysctl_larval_drop) {
3119 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3120 			err = -EREMOTE;
3121 			goto error;
3122 		}
3123 
3124 		err = -EAGAIN;
3125 
3126 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3127 		goto error;
3128 	}
3129 
3130 no_transform:
3131 	if (num_pols == 0)
3132 		goto nopol;
3133 
3134 	if ((flags & XFRM_LOOKUP_ICMP) &&
3135 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3136 		err = -ENOENT;
3137 		goto error;
3138 	}
3139 
3140 	for (i = 0; i < num_pols; i++)
3141 		pols[i]->curlft.use_time = ktime_get_real_seconds();
3142 
3143 	if (num_xfrms < 0) {
3144 		/* Prohibit the flow */
3145 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3146 		err = -EPERM;
3147 		goto error;
3148 	} else if (num_xfrms > 0) {
3149 		/* Flow transformed */
3150 		dst_release(dst_orig);
3151 	} else {
3152 		/* Flow passes untransformed */
3153 		dst_release(dst);
3154 		dst = dst_orig;
3155 	}
3156 ok:
3157 	xfrm_pols_put(pols, drop_pols);
3158 	if (dst && dst->xfrm &&
3159 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3160 		dst->flags |= DST_XFRM_TUNNEL;
3161 	return dst;
3162 
3163 nopol:
3164 	if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3165 	    net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3166 		err = -EPERM;
3167 		goto error;
3168 	}
3169 	if (!(flags & XFRM_LOOKUP_ICMP)) {
3170 		dst = dst_orig;
3171 		goto ok;
3172 	}
3173 	err = -ENOENT;
3174 error:
3175 	dst_release(dst);
3176 dropdst:
3177 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3178 		dst_release(dst_orig);
3179 	xfrm_pols_put(pols, drop_pols);
3180 	return ERR_PTR(err);
3181 }
3182 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3183 
3184 /* Main function: finds/creates a bundle for given flow.
3185  *
3186  * At the moment we eat a raw IP route. Mostly to speed up lookups
3187  * on interfaces with disabled IPsec.
3188  */
3189 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3190 			      const struct flowi *fl, const struct sock *sk,
3191 			      int flags)
3192 {
3193 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3194 }
3195 EXPORT_SYMBOL(xfrm_lookup);
3196 
3197 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3198  * Otherwise we may send out blackholed packets.
3199  */
3200 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3201 				    const struct flowi *fl,
3202 				    const struct sock *sk, int flags)
3203 {
3204 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3205 					    flags | XFRM_LOOKUP_QUEUE |
3206 					    XFRM_LOOKUP_KEEP_DST_REF);
3207 
3208 	if (PTR_ERR(dst) == -EREMOTE)
3209 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3210 
3211 	if (IS_ERR(dst))
3212 		dst_release(dst_orig);
3213 
3214 	return dst;
3215 }
3216 EXPORT_SYMBOL(xfrm_lookup_route);
3217 
3218 static inline int
3219 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3220 {
3221 	struct sec_path *sp = skb_sec_path(skb);
3222 	struct xfrm_state *x;
3223 
3224 	if (!sp || idx < 0 || idx >= sp->len)
3225 		return 0;
3226 	x = sp->xvec[idx];
3227 	if (!x->type->reject)
3228 		return 0;
3229 	return x->type->reject(x, skb, fl);
3230 }
3231 
3232 /* When skb is transformed back to its "native" form, we have to
3233  * check policy restrictions. At the moment we make this in maximally
3234  * stupid way. Shame on me. :-) Of course, connected sockets must
3235  * have policy cached at them.
3236  */
3237 
3238 static inline int
3239 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3240 	      unsigned short family)
3241 {
3242 	if (xfrm_state_kern(x))
3243 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3244 	return	x->id.proto == tmpl->id.proto &&
3245 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3246 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3247 		x->props.mode == tmpl->mode &&
3248 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3249 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3250 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3251 		  xfrm_state_addr_cmp(tmpl, x, family));
3252 }
3253 
3254 /*
3255  * 0 or more than 0 is returned when validation is succeeded (either bypass
3256  * because of optional transport mode, or next index of the matched secpath
3257  * state with the template.
3258  * -1 is returned when no matching template is found.
3259  * Otherwise "-2 - errored_index" is returned.
3260  */
3261 static inline int
3262 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3263 	       unsigned short family)
3264 {
3265 	int idx = start;
3266 
3267 	if (tmpl->optional) {
3268 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3269 			return start;
3270 	} else
3271 		start = -1;
3272 	for (; idx < sp->len; idx++) {
3273 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3274 			return ++idx;
3275 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3276 			if (start == -1)
3277 				start = -2-idx;
3278 			break;
3279 		}
3280 	}
3281 	return start;
3282 }
3283 
3284 static void
3285 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3286 {
3287 	const struct iphdr *iph = ip_hdr(skb);
3288 	int ihl = iph->ihl;
3289 	u8 *xprth = skb_network_header(skb) + ihl * 4;
3290 	struct flowi4 *fl4 = &fl->u.ip4;
3291 	int oif = 0;
3292 
3293 	if (skb_dst(skb) && skb_dst(skb)->dev)
3294 		oif = skb_dst(skb)->dev->ifindex;
3295 
3296 	memset(fl4, 0, sizeof(struct flowi4));
3297 	fl4->flowi4_mark = skb->mark;
3298 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3299 
3300 	fl4->flowi4_proto = iph->protocol;
3301 	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3302 	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3303 	fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
3304 
3305 	if (!ip_is_fragment(iph)) {
3306 		switch (iph->protocol) {
3307 		case IPPROTO_UDP:
3308 		case IPPROTO_UDPLITE:
3309 		case IPPROTO_TCP:
3310 		case IPPROTO_SCTP:
3311 		case IPPROTO_DCCP:
3312 			if (xprth + 4 < skb->data ||
3313 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3314 				__be16 *ports;
3315 
3316 				xprth = skb_network_header(skb) + ihl * 4;
3317 				ports = (__be16 *)xprth;
3318 
3319 				fl4->fl4_sport = ports[!!reverse];
3320 				fl4->fl4_dport = ports[!reverse];
3321 			}
3322 			break;
3323 		case IPPROTO_ICMP:
3324 			if (xprth + 2 < skb->data ||
3325 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3326 				u8 *icmp;
3327 
3328 				xprth = skb_network_header(skb) + ihl * 4;
3329 				icmp = xprth;
3330 
3331 				fl4->fl4_icmp_type = icmp[0];
3332 				fl4->fl4_icmp_code = icmp[1];
3333 			}
3334 			break;
3335 		case IPPROTO_GRE:
3336 			if (xprth + 12 < skb->data ||
3337 			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3338 				__be16 *greflags;
3339 				__be32 *gre_hdr;
3340 
3341 				xprth = skb_network_header(skb) + ihl * 4;
3342 				greflags = (__be16 *)xprth;
3343 				gre_hdr = (__be32 *)xprth;
3344 
3345 				if (greflags[0] & GRE_KEY) {
3346 					if (greflags[0] & GRE_CSUM)
3347 						gre_hdr++;
3348 					fl4->fl4_gre_key = gre_hdr[1];
3349 				}
3350 			}
3351 			break;
3352 		default:
3353 			break;
3354 		}
3355 	}
3356 }
3357 
3358 #if IS_ENABLED(CONFIG_IPV6)
3359 static void
3360 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3361 {
3362 	struct flowi6 *fl6 = &fl->u.ip6;
3363 	int onlyproto = 0;
3364 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3365 	u32 offset = sizeof(*hdr);
3366 	struct ipv6_opt_hdr *exthdr;
3367 	const unsigned char *nh = skb_network_header(skb);
3368 	u16 nhoff = IP6CB(skb)->nhoff;
3369 	int oif = 0;
3370 	u8 nexthdr;
3371 
3372 	if (!nhoff)
3373 		nhoff = offsetof(struct ipv6hdr, nexthdr);
3374 
3375 	nexthdr = nh[nhoff];
3376 
3377 	if (skb_dst(skb) && skb_dst(skb)->dev)
3378 		oif = skb_dst(skb)->dev->ifindex;
3379 
3380 	memset(fl6, 0, sizeof(struct flowi6));
3381 	fl6->flowi6_mark = skb->mark;
3382 	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3383 
3384 	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3385 	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3386 
3387 	while (nh + offset + sizeof(*exthdr) < skb->data ||
3388 	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3389 		nh = skb_network_header(skb);
3390 		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3391 
3392 		switch (nexthdr) {
3393 		case NEXTHDR_FRAGMENT:
3394 			onlyproto = 1;
3395 			fallthrough;
3396 		case NEXTHDR_ROUTING:
3397 		case NEXTHDR_HOP:
3398 		case NEXTHDR_DEST:
3399 			offset += ipv6_optlen(exthdr);
3400 			nexthdr = exthdr->nexthdr;
3401 			break;
3402 		case IPPROTO_UDP:
3403 		case IPPROTO_UDPLITE:
3404 		case IPPROTO_TCP:
3405 		case IPPROTO_SCTP:
3406 		case IPPROTO_DCCP:
3407 			if (!onlyproto && (nh + offset + 4 < skb->data ||
3408 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3409 				__be16 *ports;
3410 
3411 				nh = skb_network_header(skb);
3412 				ports = (__be16 *)(nh + offset);
3413 				fl6->fl6_sport = ports[!!reverse];
3414 				fl6->fl6_dport = ports[!reverse];
3415 			}
3416 			fl6->flowi6_proto = nexthdr;
3417 			return;
3418 		case IPPROTO_ICMPV6:
3419 			if (!onlyproto && (nh + offset + 2 < skb->data ||
3420 			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3421 				u8 *icmp;
3422 
3423 				nh = skb_network_header(skb);
3424 				icmp = (u8 *)(nh + offset);
3425 				fl6->fl6_icmp_type = icmp[0];
3426 				fl6->fl6_icmp_code = icmp[1];
3427 			}
3428 			fl6->flowi6_proto = nexthdr;
3429 			return;
3430 		case IPPROTO_GRE:
3431 			if (!onlyproto &&
3432 			    (nh + offset + 12 < skb->data ||
3433 			     pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
3434 				struct gre_base_hdr *gre_hdr;
3435 				__be32 *gre_key;
3436 
3437 				nh = skb_network_header(skb);
3438 				gre_hdr = (struct gre_base_hdr *)(nh + offset);
3439 				gre_key = (__be32 *)(gre_hdr + 1);
3440 
3441 				if (gre_hdr->flags & GRE_KEY) {
3442 					if (gre_hdr->flags & GRE_CSUM)
3443 						gre_key++;
3444 					fl6->fl6_gre_key = *gre_key;
3445 				}
3446 			}
3447 			fl6->flowi6_proto = nexthdr;
3448 			return;
3449 
3450 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3451 		case IPPROTO_MH:
3452 			offset += ipv6_optlen(exthdr);
3453 			if (!onlyproto && (nh + offset + 3 < skb->data ||
3454 			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3455 				struct ip6_mh *mh;
3456 
3457 				nh = skb_network_header(skb);
3458 				mh = (struct ip6_mh *)(nh + offset);
3459 				fl6->fl6_mh_type = mh->ip6mh_type;
3460 			}
3461 			fl6->flowi6_proto = nexthdr;
3462 			return;
3463 #endif
3464 		default:
3465 			fl6->flowi6_proto = nexthdr;
3466 			return;
3467 		}
3468 	}
3469 }
3470 #endif
3471 
3472 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3473 			  unsigned int family, int reverse)
3474 {
3475 	switch (family) {
3476 	case AF_INET:
3477 		decode_session4(skb, fl, reverse);
3478 		break;
3479 #if IS_ENABLED(CONFIG_IPV6)
3480 	case AF_INET6:
3481 		decode_session6(skb, fl, reverse);
3482 		break;
3483 #endif
3484 	default:
3485 		return -EAFNOSUPPORT;
3486 	}
3487 
3488 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3489 }
3490 EXPORT_SYMBOL(__xfrm_decode_session);
3491 
3492 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3493 {
3494 	for (; k < sp->len; k++) {
3495 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3496 			*idxp = k;
3497 			return 1;
3498 		}
3499 	}
3500 
3501 	return 0;
3502 }
3503 
3504 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3505 			unsigned short family)
3506 {
3507 	struct net *net = dev_net(skb->dev);
3508 	struct xfrm_policy *pol;
3509 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3510 	int npols = 0;
3511 	int xfrm_nr;
3512 	int pi;
3513 	int reverse;
3514 	struct flowi fl;
3515 	int xerr_idx = -1;
3516 	const struct xfrm_if_cb *ifcb;
3517 	struct sec_path *sp;
3518 	u32 if_id = 0;
3519 
3520 	rcu_read_lock();
3521 	ifcb = xfrm_if_get_cb();
3522 
3523 	if (ifcb) {
3524 		struct xfrm_if_decode_session_result r;
3525 
3526 		if (ifcb->decode_session(skb, family, &r)) {
3527 			if_id = r.if_id;
3528 			net = r.net;
3529 		}
3530 	}
3531 	rcu_read_unlock();
3532 
3533 	reverse = dir & ~XFRM_POLICY_MASK;
3534 	dir &= XFRM_POLICY_MASK;
3535 
3536 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3537 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3538 		return 0;
3539 	}
3540 
3541 	nf_nat_decode_session(skb, &fl, family);
3542 
3543 	/* First, check used SA against their selectors. */
3544 	sp = skb_sec_path(skb);
3545 	if (sp) {
3546 		int i;
3547 
3548 		for (i = sp->len - 1; i >= 0; i--) {
3549 			struct xfrm_state *x = sp->xvec[i];
3550 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3551 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3552 				return 0;
3553 			}
3554 		}
3555 	}
3556 
3557 	pol = NULL;
3558 	sk = sk_to_full_sk(sk);
3559 	if (sk && sk->sk_policy[dir]) {
3560 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3561 		if (IS_ERR(pol)) {
3562 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3563 			return 0;
3564 		}
3565 	}
3566 
3567 	if (!pol)
3568 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3569 
3570 	if (IS_ERR(pol)) {
3571 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3572 		return 0;
3573 	}
3574 
3575 	if (!pol) {
3576 		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3577 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3578 			return 0;
3579 		}
3580 
3581 		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3582 			xfrm_secpath_reject(xerr_idx, skb, &fl);
3583 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3584 			return 0;
3585 		}
3586 		return 1;
3587 	}
3588 
3589 	pol->curlft.use_time = ktime_get_real_seconds();
3590 
3591 	pols[0] = pol;
3592 	npols++;
3593 #ifdef CONFIG_XFRM_SUB_POLICY
3594 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3595 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3596 						    &fl, family,
3597 						    XFRM_POLICY_IN, if_id);
3598 		if (pols[1]) {
3599 			if (IS_ERR(pols[1])) {
3600 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3601 				xfrm_pol_put(pols[0]);
3602 				return 0;
3603 			}
3604 			pols[1]->curlft.use_time = ktime_get_real_seconds();
3605 			npols++;
3606 		}
3607 	}
3608 #endif
3609 
3610 	if (pol->action == XFRM_POLICY_ALLOW) {
3611 		static struct sec_path dummy;
3612 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3613 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3614 		struct xfrm_tmpl **tpp = tp;
3615 		int ti = 0;
3616 		int i, k;
3617 
3618 		sp = skb_sec_path(skb);
3619 		if (!sp)
3620 			sp = &dummy;
3621 
3622 		for (pi = 0; pi < npols; pi++) {
3623 			if (pols[pi] != pol &&
3624 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3625 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3626 				goto reject;
3627 			}
3628 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3629 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3630 				goto reject_error;
3631 			}
3632 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3633 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3634 		}
3635 		xfrm_nr = ti;
3636 
3637 		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
3638 		    !xfrm_nr) {
3639 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
3640 			goto reject;
3641 		}
3642 
3643 		if (npols > 1) {
3644 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3645 			tpp = stp;
3646 		}
3647 
3648 		/* For each tunnel xfrm, find the first matching tmpl.
3649 		 * For each tmpl before that, find corresponding xfrm.
3650 		 * Order is _important_. Later we will implement
3651 		 * some barriers, but at the moment barriers
3652 		 * are implied between each two transformations.
3653 		 */
3654 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3655 			k = xfrm_policy_ok(tpp[i], sp, k, family);
3656 			if (k < 0) {
3657 				if (k < -1)
3658 					/* "-2 - errored_index" returned */
3659 					xerr_idx = -(2+k);
3660 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3661 				goto reject;
3662 			}
3663 		}
3664 
3665 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3666 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3667 			goto reject;
3668 		}
3669 
3670 		xfrm_pols_put(pols, npols);
3671 		return 1;
3672 	}
3673 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3674 
3675 reject:
3676 	xfrm_secpath_reject(xerr_idx, skb, &fl);
3677 reject_error:
3678 	xfrm_pols_put(pols, npols);
3679 	return 0;
3680 }
3681 EXPORT_SYMBOL(__xfrm_policy_check);
3682 
3683 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3684 {
3685 	struct net *net = dev_net(skb->dev);
3686 	struct flowi fl;
3687 	struct dst_entry *dst;
3688 	int res = 1;
3689 
3690 	if (xfrm_decode_session(skb, &fl, family) < 0) {
3691 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3692 		return 0;
3693 	}
3694 
3695 	skb_dst_force(skb);
3696 	if (!skb_dst(skb)) {
3697 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3698 		return 0;
3699 	}
3700 
3701 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3702 	if (IS_ERR(dst)) {
3703 		res = 0;
3704 		dst = NULL;
3705 	}
3706 	skb_dst_set(skb, dst);
3707 	return res;
3708 }
3709 EXPORT_SYMBOL(__xfrm_route_forward);
3710 
3711 /* Optimize later using cookies and generation ids. */
3712 
3713 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3714 {
3715 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3716 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3717 	 * get validated by dst_ops->check on every use.  We do this
3718 	 * because when a normal route referenced by an XFRM dst is
3719 	 * obsoleted we do not go looking around for all parent
3720 	 * referencing XFRM dsts so that we can invalidate them.  It
3721 	 * is just too much work.  Instead we make the checks here on
3722 	 * every use.  For example:
3723 	 *
3724 	 *	XFRM dst A --> IPv4 dst X
3725 	 *
3726 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3727 	 * in this example).  If X is marked obsolete, "A" will not
3728 	 * notice.  That's what we are validating here via the
3729 	 * stale_bundle() check.
3730 	 *
3731 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3732 	 * be marked on it.
3733 	 * This will force stale_bundle() to fail on any xdst bundle with
3734 	 * this dst linked in it.
3735 	 */
3736 	if (dst->obsolete < 0 && !stale_bundle(dst))
3737 		return dst;
3738 
3739 	return NULL;
3740 }
3741 
3742 static int stale_bundle(struct dst_entry *dst)
3743 {
3744 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3745 }
3746 
3747 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3748 {
3749 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3750 		dst->dev = blackhole_netdev;
3751 		dev_hold(dst->dev);
3752 		dev_put(dev);
3753 	}
3754 }
3755 EXPORT_SYMBOL(xfrm_dst_ifdown);
3756 
3757 static void xfrm_link_failure(struct sk_buff *skb)
3758 {
3759 	/* Impossible. Such dst must be popped before reaches point of failure. */
3760 }
3761 
3762 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3763 {
3764 	if (dst) {
3765 		if (dst->obsolete) {
3766 			dst_release(dst);
3767 			dst = NULL;
3768 		}
3769 	}
3770 	return dst;
3771 }
3772 
3773 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3774 {
3775 	while (nr--) {
3776 		struct xfrm_dst *xdst = bundle[nr];
3777 		u32 pmtu, route_mtu_cached;
3778 		struct dst_entry *dst;
3779 
3780 		dst = &xdst->u.dst;
3781 		pmtu = dst_mtu(xfrm_dst_child(dst));
3782 		xdst->child_mtu_cached = pmtu;
3783 
3784 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3785 
3786 		route_mtu_cached = dst_mtu(xdst->route);
3787 		xdst->route_mtu_cached = route_mtu_cached;
3788 
3789 		if (pmtu > route_mtu_cached)
3790 			pmtu = route_mtu_cached;
3791 
3792 		dst_metric_set(dst, RTAX_MTU, pmtu);
3793 	}
3794 }
3795 
3796 /* Check that the bundle accepts the flow and its components are
3797  * still valid.
3798  */
3799 
3800 static int xfrm_bundle_ok(struct xfrm_dst *first)
3801 {
3802 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3803 	struct dst_entry *dst = &first->u.dst;
3804 	struct xfrm_dst *xdst;
3805 	int start_from, nr;
3806 	u32 mtu;
3807 
3808 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3809 	    (dst->dev && !netif_running(dst->dev)))
3810 		return 0;
3811 
3812 	if (dst->flags & DST_XFRM_QUEUE)
3813 		return 1;
3814 
3815 	start_from = nr = 0;
3816 	do {
3817 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3818 
3819 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3820 			return 0;
3821 		if (xdst->xfrm_genid != dst->xfrm->genid)
3822 			return 0;
3823 		if (xdst->num_pols > 0 &&
3824 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3825 			return 0;
3826 
3827 		bundle[nr++] = xdst;
3828 
3829 		mtu = dst_mtu(xfrm_dst_child(dst));
3830 		if (xdst->child_mtu_cached != mtu) {
3831 			start_from = nr;
3832 			xdst->child_mtu_cached = mtu;
3833 		}
3834 
3835 		if (!dst_check(xdst->route, xdst->route_cookie))
3836 			return 0;
3837 		mtu = dst_mtu(xdst->route);
3838 		if (xdst->route_mtu_cached != mtu) {
3839 			start_from = nr;
3840 			xdst->route_mtu_cached = mtu;
3841 		}
3842 
3843 		dst = xfrm_dst_child(dst);
3844 	} while (dst->xfrm);
3845 
3846 	if (likely(!start_from))
3847 		return 1;
3848 
3849 	xdst = bundle[start_from - 1];
3850 	mtu = xdst->child_mtu_cached;
3851 	while (start_from--) {
3852 		dst = &xdst->u.dst;
3853 
3854 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3855 		if (mtu > xdst->route_mtu_cached)
3856 			mtu = xdst->route_mtu_cached;
3857 		dst_metric_set(dst, RTAX_MTU, mtu);
3858 		if (!start_from)
3859 			break;
3860 
3861 		xdst = bundle[start_from - 1];
3862 		xdst->child_mtu_cached = mtu;
3863 	}
3864 
3865 	return 1;
3866 }
3867 
3868 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3869 {
3870 	return dst_metric_advmss(xfrm_dst_path(dst));
3871 }
3872 
3873 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3874 {
3875 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3876 
3877 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3878 }
3879 
3880 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3881 					const void *daddr)
3882 {
3883 	while (dst->xfrm) {
3884 		const struct xfrm_state *xfrm = dst->xfrm;
3885 
3886 		dst = xfrm_dst_child(dst);
3887 
3888 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3889 			continue;
3890 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3891 			daddr = xfrm->coaddr;
3892 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3893 			daddr = &xfrm->id.daddr;
3894 	}
3895 	return daddr;
3896 }
3897 
3898 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3899 					   struct sk_buff *skb,
3900 					   const void *daddr)
3901 {
3902 	const struct dst_entry *path = xfrm_dst_path(dst);
3903 
3904 	if (!skb)
3905 		daddr = xfrm_get_dst_nexthop(dst, daddr);
3906 	return path->ops->neigh_lookup(path, skb, daddr);
3907 }
3908 
3909 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3910 {
3911 	const struct dst_entry *path = xfrm_dst_path(dst);
3912 
3913 	daddr = xfrm_get_dst_nexthop(dst, daddr);
3914 	path->ops->confirm_neigh(path, daddr);
3915 }
3916 
3917 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3918 {
3919 	int err = 0;
3920 
3921 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3922 		return -EAFNOSUPPORT;
3923 
3924 	spin_lock(&xfrm_policy_afinfo_lock);
3925 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3926 		err = -EEXIST;
3927 	else {
3928 		struct dst_ops *dst_ops = afinfo->dst_ops;
3929 		if (likely(dst_ops->kmem_cachep == NULL))
3930 			dst_ops->kmem_cachep = xfrm_dst_cache;
3931 		if (likely(dst_ops->check == NULL))
3932 			dst_ops->check = xfrm_dst_check;
3933 		if (likely(dst_ops->default_advmss == NULL))
3934 			dst_ops->default_advmss = xfrm_default_advmss;
3935 		if (likely(dst_ops->mtu == NULL))
3936 			dst_ops->mtu = xfrm_mtu;
3937 		if (likely(dst_ops->negative_advice == NULL))
3938 			dst_ops->negative_advice = xfrm_negative_advice;
3939 		if (likely(dst_ops->link_failure == NULL))
3940 			dst_ops->link_failure = xfrm_link_failure;
3941 		if (likely(dst_ops->neigh_lookup == NULL))
3942 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3943 		if (likely(!dst_ops->confirm_neigh))
3944 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3945 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3946 	}
3947 	spin_unlock(&xfrm_policy_afinfo_lock);
3948 
3949 	return err;
3950 }
3951 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3952 
3953 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3954 {
3955 	struct dst_ops *dst_ops = afinfo->dst_ops;
3956 	int i;
3957 
3958 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3959 		if (xfrm_policy_afinfo[i] != afinfo)
3960 			continue;
3961 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3962 		break;
3963 	}
3964 
3965 	synchronize_rcu();
3966 
3967 	dst_ops->kmem_cachep = NULL;
3968 	dst_ops->check = NULL;
3969 	dst_ops->negative_advice = NULL;
3970 	dst_ops->link_failure = NULL;
3971 }
3972 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3973 
3974 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3975 {
3976 	spin_lock(&xfrm_if_cb_lock);
3977 	rcu_assign_pointer(xfrm_if_cb, ifcb);
3978 	spin_unlock(&xfrm_if_cb_lock);
3979 }
3980 EXPORT_SYMBOL(xfrm_if_register_cb);
3981 
3982 void xfrm_if_unregister_cb(void)
3983 {
3984 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3985 	synchronize_rcu();
3986 }
3987 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3988 
3989 #ifdef CONFIG_XFRM_STATISTICS
3990 static int __net_init xfrm_statistics_init(struct net *net)
3991 {
3992 	int rv;
3993 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3994 	if (!net->mib.xfrm_statistics)
3995 		return -ENOMEM;
3996 	rv = xfrm_proc_init(net);
3997 	if (rv < 0)
3998 		free_percpu(net->mib.xfrm_statistics);
3999 	return rv;
4000 }
4001 
4002 static void xfrm_statistics_fini(struct net *net)
4003 {
4004 	xfrm_proc_fini(net);
4005 	free_percpu(net->mib.xfrm_statistics);
4006 }
4007 #else
4008 static int __net_init xfrm_statistics_init(struct net *net)
4009 {
4010 	return 0;
4011 }
4012 
4013 static void xfrm_statistics_fini(struct net *net)
4014 {
4015 }
4016 #endif
4017 
4018 static int __net_init xfrm_policy_init(struct net *net)
4019 {
4020 	unsigned int hmask, sz;
4021 	int dir, err;
4022 
4023 	if (net_eq(net, &init_net)) {
4024 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4025 					   sizeof(struct xfrm_dst),
4026 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4027 					   NULL);
4028 		err = rhashtable_init(&xfrm_policy_inexact_table,
4029 				      &xfrm_pol_inexact_params);
4030 		BUG_ON(err);
4031 	}
4032 
4033 	hmask = 8 - 1;
4034 	sz = (hmask+1) * sizeof(struct hlist_head);
4035 
4036 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4037 	if (!net->xfrm.policy_byidx)
4038 		goto out_byidx;
4039 	net->xfrm.policy_idx_hmask = hmask;
4040 
4041 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4042 		struct xfrm_policy_hash *htab;
4043 
4044 		net->xfrm.policy_count[dir] = 0;
4045 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4046 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4047 
4048 		htab = &net->xfrm.policy_bydst[dir];
4049 		htab->table = xfrm_hash_alloc(sz);
4050 		if (!htab->table)
4051 			goto out_bydst;
4052 		htab->hmask = hmask;
4053 		htab->dbits4 = 32;
4054 		htab->sbits4 = 32;
4055 		htab->dbits6 = 128;
4056 		htab->sbits6 = 128;
4057 	}
4058 	net->xfrm.policy_hthresh.lbits4 = 32;
4059 	net->xfrm.policy_hthresh.rbits4 = 32;
4060 	net->xfrm.policy_hthresh.lbits6 = 128;
4061 	net->xfrm.policy_hthresh.rbits6 = 128;
4062 
4063 	seqlock_init(&net->xfrm.policy_hthresh.lock);
4064 
4065 	INIT_LIST_HEAD(&net->xfrm.policy_all);
4066 	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4067 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4068 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4069 	return 0;
4070 
4071 out_bydst:
4072 	for (dir--; dir >= 0; dir--) {
4073 		struct xfrm_policy_hash *htab;
4074 
4075 		htab = &net->xfrm.policy_bydst[dir];
4076 		xfrm_hash_free(htab->table, sz);
4077 	}
4078 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4079 out_byidx:
4080 	return -ENOMEM;
4081 }
4082 
4083 static void xfrm_policy_fini(struct net *net)
4084 {
4085 	struct xfrm_pol_inexact_bin *b, *t;
4086 	unsigned int sz;
4087 	int dir;
4088 
4089 	flush_work(&net->xfrm.policy_hash_work);
4090 #ifdef CONFIG_XFRM_SUB_POLICY
4091 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4092 #endif
4093 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4094 
4095 	WARN_ON(!list_empty(&net->xfrm.policy_all));
4096 
4097 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4098 		struct xfrm_policy_hash *htab;
4099 
4100 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4101 
4102 		htab = &net->xfrm.policy_bydst[dir];
4103 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4104 		WARN_ON(!hlist_empty(htab->table));
4105 		xfrm_hash_free(htab->table, sz);
4106 	}
4107 
4108 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4109 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4110 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4111 
4112 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4113 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4114 		__xfrm_policy_inexact_prune_bin(b, true);
4115 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4116 }
4117 
4118 static int __net_init xfrm_net_init(struct net *net)
4119 {
4120 	int rv;
4121 
4122 	/* Initialize the per-net locks here */
4123 	spin_lock_init(&net->xfrm.xfrm_state_lock);
4124 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4125 	seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4126 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4127 	net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4128 	net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4129 	net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4130 
4131 	rv = xfrm_statistics_init(net);
4132 	if (rv < 0)
4133 		goto out_statistics;
4134 	rv = xfrm_state_init(net);
4135 	if (rv < 0)
4136 		goto out_state;
4137 	rv = xfrm_policy_init(net);
4138 	if (rv < 0)
4139 		goto out_policy;
4140 	rv = xfrm_sysctl_init(net);
4141 	if (rv < 0)
4142 		goto out_sysctl;
4143 
4144 	return 0;
4145 
4146 out_sysctl:
4147 	xfrm_policy_fini(net);
4148 out_policy:
4149 	xfrm_state_fini(net);
4150 out_state:
4151 	xfrm_statistics_fini(net);
4152 out_statistics:
4153 	return rv;
4154 }
4155 
4156 static void __net_exit xfrm_net_exit(struct net *net)
4157 {
4158 	xfrm_sysctl_fini(net);
4159 	xfrm_policy_fini(net);
4160 	xfrm_state_fini(net);
4161 	xfrm_statistics_fini(net);
4162 }
4163 
4164 static struct pernet_operations __net_initdata xfrm_net_ops = {
4165 	.init = xfrm_net_init,
4166 	.exit = xfrm_net_exit,
4167 };
4168 
4169 void __init xfrm_init(void)
4170 {
4171 	register_pernet_subsys(&xfrm_net_ops);
4172 	xfrm_dev_init();
4173 	xfrm_input_init();
4174 
4175 #ifdef CONFIG_XFRM_ESPINTCP
4176 	espintcp_init();
4177 #endif
4178 }
4179 
4180 #ifdef CONFIG_AUDITSYSCALL
4181 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4182 					 struct audit_buffer *audit_buf)
4183 {
4184 	struct xfrm_sec_ctx *ctx = xp->security;
4185 	struct xfrm_selector *sel = &xp->selector;
4186 
4187 	if (ctx)
4188 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4189 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4190 
4191 	switch (sel->family) {
4192 	case AF_INET:
4193 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4194 		if (sel->prefixlen_s != 32)
4195 			audit_log_format(audit_buf, " src_prefixlen=%d",
4196 					 sel->prefixlen_s);
4197 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4198 		if (sel->prefixlen_d != 32)
4199 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4200 					 sel->prefixlen_d);
4201 		break;
4202 	case AF_INET6:
4203 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4204 		if (sel->prefixlen_s != 128)
4205 			audit_log_format(audit_buf, " src_prefixlen=%d",
4206 					 sel->prefixlen_s);
4207 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4208 		if (sel->prefixlen_d != 128)
4209 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4210 					 sel->prefixlen_d);
4211 		break;
4212 	}
4213 }
4214 
4215 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4216 {
4217 	struct audit_buffer *audit_buf;
4218 
4219 	audit_buf = xfrm_audit_start("SPD-add");
4220 	if (audit_buf == NULL)
4221 		return;
4222 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4223 	audit_log_format(audit_buf, " res=%u", result);
4224 	xfrm_audit_common_policyinfo(xp, audit_buf);
4225 	audit_log_end(audit_buf);
4226 }
4227 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4228 
4229 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4230 			      bool task_valid)
4231 {
4232 	struct audit_buffer *audit_buf;
4233 
4234 	audit_buf = xfrm_audit_start("SPD-delete");
4235 	if (audit_buf == NULL)
4236 		return;
4237 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4238 	audit_log_format(audit_buf, " res=%u", result);
4239 	xfrm_audit_common_policyinfo(xp, audit_buf);
4240 	audit_log_end(audit_buf);
4241 }
4242 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4243 #endif
4244 
4245 #ifdef CONFIG_XFRM_MIGRATE
4246 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4247 					const struct xfrm_selector *sel_tgt)
4248 {
4249 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4250 		if (sel_tgt->family == sel_cmp->family &&
4251 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4252 				    sel_cmp->family) &&
4253 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4254 				    sel_cmp->family) &&
4255 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4256 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4257 			return true;
4258 		}
4259 	} else {
4260 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4261 			return true;
4262 		}
4263 	}
4264 	return false;
4265 }
4266 
4267 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4268 						    u8 dir, u8 type, struct net *net, u32 if_id)
4269 {
4270 	struct xfrm_policy *pol, *ret = NULL;
4271 	struct hlist_head *chain;
4272 	u32 priority = ~0U;
4273 
4274 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4275 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4276 	hlist_for_each_entry(pol, chain, bydst) {
4277 		if ((if_id == 0 || pol->if_id == if_id) &&
4278 		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4279 		    pol->type == type) {
4280 			ret = pol;
4281 			priority = ret->priority;
4282 			break;
4283 		}
4284 	}
4285 	chain = &net->xfrm.policy_inexact[dir];
4286 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4287 		if ((pol->priority >= priority) && ret)
4288 			break;
4289 
4290 		if ((if_id == 0 || pol->if_id == if_id) &&
4291 		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4292 		    pol->type == type) {
4293 			ret = pol;
4294 			break;
4295 		}
4296 	}
4297 
4298 	xfrm_pol_hold(ret);
4299 
4300 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4301 
4302 	return ret;
4303 }
4304 
4305 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4306 {
4307 	int match = 0;
4308 
4309 	if (t->mode == m->mode && t->id.proto == m->proto &&
4310 	    (m->reqid == 0 || t->reqid == m->reqid)) {
4311 		switch (t->mode) {
4312 		case XFRM_MODE_TUNNEL:
4313 		case XFRM_MODE_BEET:
4314 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4315 					    m->old_family) &&
4316 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4317 					    m->old_family)) {
4318 				match = 1;
4319 			}
4320 			break;
4321 		case XFRM_MODE_TRANSPORT:
4322 			/* in case of transport mode, template does not store
4323 			   any IP addresses, hence we just compare mode and
4324 			   protocol */
4325 			match = 1;
4326 			break;
4327 		default:
4328 			break;
4329 		}
4330 	}
4331 	return match;
4332 }
4333 
4334 /* update endpoint address(es) of template(s) */
4335 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4336 			       struct xfrm_migrate *m, int num_migrate)
4337 {
4338 	struct xfrm_migrate *mp;
4339 	int i, j, n = 0;
4340 
4341 	write_lock_bh(&pol->lock);
4342 	if (unlikely(pol->walk.dead)) {
4343 		/* target policy has been deleted */
4344 		write_unlock_bh(&pol->lock);
4345 		return -ENOENT;
4346 	}
4347 
4348 	for (i = 0; i < pol->xfrm_nr; i++) {
4349 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4350 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4351 				continue;
4352 			n++;
4353 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4354 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4355 				continue;
4356 			/* update endpoints */
4357 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4358 			       sizeof(pol->xfrm_vec[i].id.daddr));
4359 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4360 			       sizeof(pol->xfrm_vec[i].saddr));
4361 			pol->xfrm_vec[i].encap_family = mp->new_family;
4362 			/* flush bundles */
4363 			atomic_inc(&pol->genid);
4364 		}
4365 	}
4366 
4367 	write_unlock_bh(&pol->lock);
4368 
4369 	if (!n)
4370 		return -ENODATA;
4371 
4372 	return 0;
4373 }
4374 
4375 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4376 {
4377 	int i, j;
4378 
4379 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4380 		return -EINVAL;
4381 
4382 	for (i = 0; i < num_migrate; i++) {
4383 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4384 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4385 			return -EINVAL;
4386 
4387 		/* check if there is any duplicated entry */
4388 		for (j = i + 1; j < num_migrate; j++) {
4389 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4390 				    sizeof(m[i].old_daddr)) &&
4391 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4392 				    sizeof(m[i].old_saddr)) &&
4393 			    m[i].proto == m[j].proto &&
4394 			    m[i].mode == m[j].mode &&
4395 			    m[i].reqid == m[j].reqid &&
4396 			    m[i].old_family == m[j].old_family)
4397 				return -EINVAL;
4398 		}
4399 	}
4400 
4401 	return 0;
4402 }
4403 
4404 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4405 		 struct xfrm_migrate *m, int num_migrate,
4406 		 struct xfrm_kmaddress *k, struct net *net,
4407 		 struct xfrm_encap_tmpl *encap, u32 if_id)
4408 {
4409 	int i, err, nx_cur = 0, nx_new = 0;
4410 	struct xfrm_policy *pol = NULL;
4411 	struct xfrm_state *x, *xc;
4412 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4413 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4414 	struct xfrm_migrate *mp;
4415 
4416 	/* Stage 0 - sanity checks */
4417 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4418 		goto out;
4419 
4420 	if (dir >= XFRM_POLICY_MAX) {
4421 		err = -EINVAL;
4422 		goto out;
4423 	}
4424 
4425 	/* Stage 1 - find policy */
4426 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id)) == NULL) {
4427 		err = -ENOENT;
4428 		goto out;
4429 	}
4430 
4431 	/* Stage 2 - find and update state(s) */
4432 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4433 		if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4434 			x_cur[nx_cur] = x;
4435 			nx_cur++;
4436 			xc = xfrm_state_migrate(x, mp, encap);
4437 			if (xc) {
4438 				x_new[nx_new] = xc;
4439 				nx_new++;
4440 			} else {
4441 				err = -ENODATA;
4442 				goto restore_state;
4443 			}
4444 		}
4445 	}
4446 
4447 	/* Stage 3 - update policy */
4448 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4449 		goto restore_state;
4450 
4451 	/* Stage 4 - delete old state(s) */
4452 	if (nx_cur) {
4453 		xfrm_states_put(x_cur, nx_cur);
4454 		xfrm_states_delete(x_cur, nx_cur);
4455 	}
4456 
4457 	/* Stage 5 - announce */
4458 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4459 
4460 	xfrm_pol_put(pol);
4461 
4462 	return 0;
4463 out:
4464 	return err;
4465 
4466 restore_state:
4467 	if (pol)
4468 		xfrm_pol_put(pol);
4469 	if (nx_cur)
4470 		xfrm_states_put(x_cur, nx_cur);
4471 	if (nx_new)
4472 		xfrm_states_delete(x_new, nx_new);
4473 
4474 	return err;
4475 }
4476 EXPORT_SYMBOL(xfrm_migrate);
4477 #endif
4478