xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision 6724ed7f)
1 /*
2  * xfrm_policy.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	Kazunori MIYAZAWA @USAGI
10  * 	YOSHIFUJI Hideaki
11  * 		Split up af-specific portion
12  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13  *
14  */
15 
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/cpu.h>
28 #include <linux/audit.h>
29 #include <net/dst.h>
30 #include <net/flow.h>
31 #include <net/xfrm.h>
32 #include <net/ip.h>
33 #ifdef CONFIG_XFRM_STATISTICS
34 #include <net/snmp.h>
35 #endif
36 
37 #include "xfrm_hash.h"
38 
39 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
40 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
41 #define XFRM_MAX_QUEUE_LEN	100
42 
43 struct xfrm_flo {
44 	struct dst_entry *dst_orig;
45 	u8 flags;
46 };
47 
48 static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst);
49 static struct work_struct *xfrm_pcpu_work __read_mostly;
50 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
51 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
52 						__read_mostly;
53 
54 static struct kmem_cache *xfrm_dst_cache __read_mostly;
55 static __read_mostly seqcount_t xfrm_policy_hash_generation;
56 
57 static void xfrm_init_pmtu(struct dst_entry *dst);
58 static int stale_bundle(struct dst_entry *dst);
59 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
60 static void xfrm_policy_queue_process(struct timer_list *t);
61 
62 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
63 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
64 						int dir);
65 
66 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
67 {
68 	return refcount_inc_not_zero(&policy->refcnt);
69 }
70 
71 static inline bool
72 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
73 {
74 	const struct flowi4 *fl4 = &fl->u.ip4;
75 
76 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
77 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
78 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
79 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
80 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
81 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
82 }
83 
84 static inline bool
85 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
86 {
87 	const struct flowi6 *fl6 = &fl->u.ip6;
88 
89 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
90 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
91 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
92 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
93 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
94 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
95 }
96 
97 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
98 			 unsigned short family)
99 {
100 	switch (family) {
101 	case AF_INET:
102 		return __xfrm4_selector_match(sel, fl);
103 	case AF_INET6:
104 		return __xfrm6_selector_match(sel, fl);
105 	}
106 	return false;
107 }
108 
109 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
110 {
111 	const struct xfrm_policy_afinfo *afinfo;
112 
113 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
114 		return NULL;
115 	rcu_read_lock();
116 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
117 	if (unlikely(!afinfo))
118 		rcu_read_unlock();
119 	return afinfo;
120 }
121 
122 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
123 				    const xfrm_address_t *saddr,
124 				    const xfrm_address_t *daddr,
125 				    int family, u32 mark)
126 {
127 	const struct xfrm_policy_afinfo *afinfo;
128 	struct dst_entry *dst;
129 
130 	afinfo = xfrm_policy_get_afinfo(family);
131 	if (unlikely(afinfo == NULL))
132 		return ERR_PTR(-EAFNOSUPPORT);
133 
134 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
135 
136 	rcu_read_unlock();
137 
138 	return dst;
139 }
140 EXPORT_SYMBOL(__xfrm_dst_lookup);
141 
142 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
143 						int tos, int oif,
144 						xfrm_address_t *prev_saddr,
145 						xfrm_address_t *prev_daddr,
146 						int family, u32 mark)
147 {
148 	struct net *net = xs_net(x);
149 	xfrm_address_t *saddr = &x->props.saddr;
150 	xfrm_address_t *daddr = &x->id.daddr;
151 	struct dst_entry *dst;
152 
153 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
154 		saddr = x->coaddr;
155 		daddr = prev_daddr;
156 	}
157 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
158 		saddr = prev_saddr;
159 		daddr = x->coaddr;
160 	}
161 
162 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
163 
164 	if (!IS_ERR(dst)) {
165 		if (prev_saddr != saddr)
166 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
167 		if (prev_daddr != daddr)
168 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
169 	}
170 
171 	return dst;
172 }
173 
174 static inline unsigned long make_jiffies(long secs)
175 {
176 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
177 		return MAX_SCHEDULE_TIMEOUT-1;
178 	else
179 		return secs*HZ;
180 }
181 
182 static void xfrm_policy_timer(struct timer_list *t)
183 {
184 	struct xfrm_policy *xp = from_timer(xp, t, timer);
185 	unsigned long now = get_seconds();
186 	long next = LONG_MAX;
187 	int warn = 0;
188 	int dir;
189 
190 	read_lock(&xp->lock);
191 
192 	if (unlikely(xp->walk.dead))
193 		goto out;
194 
195 	dir = xfrm_policy_id2dir(xp->index);
196 
197 	if (xp->lft.hard_add_expires_seconds) {
198 		long tmo = xp->lft.hard_add_expires_seconds +
199 			xp->curlft.add_time - now;
200 		if (tmo <= 0)
201 			goto expired;
202 		if (tmo < next)
203 			next = tmo;
204 	}
205 	if (xp->lft.hard_use_expires_seconds) {
206 		long tmo = xp->lft.hard_use_expires_seconds +
207 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
208 		if (tmo <= 0)
209 			goto expired;
210 		if (tmo < next)
211 			next = tmo;
212 	}
213 	if (xp->lft.soft_add_expires_seconds) {
214 		long tmo = xp->lft.soft_add_expires_seconds +
215 			xp->curlft.add_time - now;
216 		if (tmo <= 0) {
217 			warn = 1;
218 			tmo = XFRM_KM_TIMEOUT;
219 		}
220 		if (tmo < next)
221 			next = tmo;
222 	}
223 	if (xp->lft.soft_use_expires_seconds) {
224 		long tmo = xp->lft.soft_use_expires_seconds +
225 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
226 		if (tmo <= 0) {
227 			warn = 1;
228 			tmo = XFRM_KM_TIMEOUT;
229 		}
230 		if (tmo < next)
231 			next = tmo;
232 	}
233 
234 	if (warn)
235 		km_policy_expired(xp, dir, 0, 0);
236 	if (next != LONG_MAX &&
237 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
238 		xfrm_pol_hold(xp);
239 
240 out:
241 	read_unlock(&xp->lock);
242 	xfrm_pol_put(xp);
243 	return;
244 
245 expired:
246 	read_unlock(&xp->lock);
247 	if (!xfrm_policy_delete(xp, dir))
248 		km_policy_expired(xp, dir, 1, 0);
249 	xfrm_pol_put(xp);
250 }
251 
252 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
253  * SPD calls.
254  */
255 
256 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
257 {
258 	struct xfrm_policy *policy;
259 
260 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
261 
262 	if (policy) {
263 		write_pnet(&policy->xp_net, net);
264 		INIT_LIST_HEAD(&policy->walk.all);
265 		INIT_HLIST_NODE(&policy->bydst);
266 		INIT_HLIST_NODE(&policy->byidx);
267 		rwlock_init(&policy->lock);
268 		refcount_set(&policy->refcnt, 1);
269 		skb_queue_head_init(&policy->polq.hold_queue);
270 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
271 		timer_setup(&policy->polq.hold_timer,
272 			    xfrm_policy_queue_process, 0);
273 	}
274 	return policy;
275 }
276 EXPORT_SYMBOL(xfrm_policy_alloc);
277 
278 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
279 {
280 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
281 
282 	security_xfrm_policy_free(policy->security);
283 	kfree(policy);
284 }
285 
286 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
287 
288 void xfrm_policy_destroy(struct xfrm_policy *policy)
289 {
290 	BUG_ON(!policy->walk.dead);
291 
292 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
293 		BUG();
294 
295 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
296 }
297 EXPORT_SYMBOL(xfrm_policy_destroy);
298 
299 /* Rule must be locked. Release descendant resources, announce
300  * entry dead. The rule must be unlinked from lists to the moment.
301  */
302 
303 static void xfrm_policy_kill(struct xfrm_policy *policy)
304 {
305 	policy->walk.dead = 1;
306 
307 	atomic_inc(&policy->genid);
308 
309 	if (del_timer(&policy->polq.hold_timer))
310 		xfrm_pol_put(policy);
311 	skb_queue_purge(&policy->polq.hold_queue);
312 
313 	if (del_timer(&policy->timer))
314 		xfrm_pol_put(policy);
315 
316 	xfrm_pol_put(policy);
317 }
318 
319 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
320 
321 static inline unsigned int idx_hash(struct net *net, u32 index)
322 {
323 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
324 }
325 
326 /* calculate policy hash thresholds */
327 static void __get_hash_thresh(struct net *net,
328 			      unsigned short family, int dir,
329 			      u8 *dbits, u8 *sbits)
330 {
331 	switch (family) {
332 	case AF_INET:
333 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
334 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
335 		break;
336 
337 	case AF_INET6:
338 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
339 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
340 		break;
341 
342 	default:
343 		*dbits = 0;
344 		*sbits = 0;
345 	}
346 }
347 
348 static struct hlist_head *policy_hash_bysel(struct net *net,
349 					    const struct xfrm_selector *sel,
350 					    unsigned short family, int dir)
351 {
352 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
353 	unsigned int hash;
354 	u8 dbits;
355 	u8 sbits;
356 
357 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
358 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
359 
360 	if (hash == hmask + 1)
361 		return &net->xfrm.policy_inexact[dir];
362 
363 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
364 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
365 }
366 
367 static struct hlist_head *policy_hash_direct(struct net *net,
368 					     const xfrm_address_t *daddr,
369 					     const xfrm_address_t *saddr,
370 					     unsigned short family, int dir)
371 {
372 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
373 	unsigned int hash;
374 	u8 dbits;
375 	u8 sbits;
376 
377 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
378 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
379 
380 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
381 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
382 }
383 
384 static void xfrm_dst_hash_transfer(struct net *net,
385 				   struct hlist_head *list,
386 				   struct hlist_head *ndsttable,
387 				   unsigned int nhashmask,
388 				   int dir)
389 {
390 	struct hlist_node *tmp, *entry0 = NULL;
391 	struct xfrm_policy *pol;
392 	unsigned int h0 = 0;
393 	u8 dbits;
394 	u8 sbits;
395 
396 redo:
397 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
398 		unsigned int h;
399 
400 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
401 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
402 				pol->family, nhashmask, dbits, sbits);
403 		if (!entry0) {
404 			hlist_del_rcu(&pol->bydst);
405 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
406 			h0 = h;
407 		} else {
408 			if (h != h0)
409 				continue;
410 			hlist_del_rcu(&pol->bydst);
411 			hlist_add_behind_rcu(&pol->bydst, entry0);
412 		}
413 		entry0 = &pol->bydst;
414 	}
415 	if (!hlist_empty(list)) {
416 		entry0 = NULL;
417 		goto redo;
418 	}
419 }
420 
421 static void xfrm_idx_hash_transfer(struct hlist_head *list,
422 				   struct hlist_head *nidxtable,
423 				   unsigned int nhashmask)
424 {
425 	struct hlist_node *tmp;
426 	struct xfrm_policy *pol;
427 
428 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
429 		unsigned int h;
430 
431 		h = __idx_hash(pol->index, nhashmask);
432 		hlist_add_head(&pol->byidx, nidxtable+h);
433 	}
434 }
435 
436 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
437 {
438 	return ((old_hmask + 1) << 1) - 1;
439 }
440 
441 static void xfrm_bydst_resize(struct net *net, int dir)
442 {
443 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
444 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
445 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
446 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
447 	struct hlist_head *odst;
448 	int i;
449 
450 	if (!ndst)
451 		return;
452 
453 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
454 	write_seqcount_begin(&xfrm_policy_hash_generation);
455 
456 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
457 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
458 
459 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
460 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
461 
462 	for (i = hmask; i >= 0; i--)
463 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
464 
465 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
466 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
467 
468 	write_seqcount_end(&xfrm_policy_hash_generation);
469 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
470 
471 	synchronize_rcu();
472 
473 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
474 }
475 
476 static void xfrm_byidx_resize(struct net *net, int total)
477 {
478 	unsigned int hmask = net->xfrm.policy_idx_hmask;
479 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
480 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
481 	struct hlist_head *oidx = net->xfrm.policy_byidx;
482 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
483 	int i;
484 
485 	if (!nidx)
486 		return;
487 
488 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
489 
490 	for (i = hmask; i >= 0; i--)
491 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
492 
493 	net->xfrm.policy_byidx = nidx;
494 	net->xfrm.policy_idx_hmask = nhashmask;
495 
496 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
497 
498 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
499 }
500 
501 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
502 {
503 	unsigned int cnt = net->xfrm.policy_count[dir];
504 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
505 
506 	if (total)
507 		*total += cnt;
508 
509 	if ((hmask + 1) < xfrm_policy_hashmax &&
510 	    cnt > hmask)
511 		return 1;
512 
513 	return 0;
514 }
515 
516 static inline int xfrm_byidx_should_resize(struct net *net, int total)
517 {
518 	unsigned int hmask = net->xfrm.policy_idx_hmask;
519 
520 	if ((hmask + 1) < xfrm_policy_hashmax &&
521 	    total > hmask)
522 		return 1;
523 
524 	return 0;
525 }
526 
527 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
528 {
529 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
530 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
531 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
532 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
533 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
534 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
535 	si->spdhcnt = net->xfrm.policy_idx_hmask;
536 	si->spdhmcnt = xfrm_policy_hashmax;
537 }
538 EXPORT_SYMBOL(xfrm_spd_getinfo);
539 
540 static DEFINE_MUTEX(hash_resize_mutex);
541 static void xfrm_hash_resize(struct work_struct *work)
542 {
543 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
544 	int dir, total;
545 
546 	mutex_lock(&hash_resize_mutex);
547 
548 	total = 0;
549 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
550 		if (xfrm_bydst_should_resize(net, dir, &total))
551 			xfrm_bydst_resize(net, dir);
552 	}
553 	if (xfrm_byidx_should_resize(net, total))
554 		xfrm_byidx_resize(net, total);
555 
556 	mutex_unlock(&hash_resize_mutex);
557 }
558 
559 static void xfrm_hash_rebuild(struct work_struct *work)
560 {
561 	struct net *net = container_of(work, struct net,
562 				       xfrm.policy_hthresh.work);
563 	unsigned int hmask;
564 	struct xfrm_policy *pol;
565 	struct xfrm_policy *policy;
566 	struct hlist_head *chain;
567 	struct hlist_head *odst;
568 	struct hlist_node *newpos;
569 	int i;
570 	int dir;
571 	unsigned seq;
572 	u8 lbits4, rbits4, lbits6, rbits6;
573 
574 	mutex_lock(&hash_resize_mutex);
575 
576 	/* read selector prefixlen thresholds */
577 	do {
578 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
579 
580 		lbits4 = net->xfrm.policy_hthresh.lbits4;
581 		rbits4 = net->xfrm.policy_hthresh.rbits4;
582 		lbits6 = net->xfrm.policy_hthresh.lbits6;
583 		rbits6 = net->xfrm.policy_hthresh.rbits6;
584 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
585 
586 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
587 
588 	/* reset the bydst and inexact table in all directions */
589 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
590 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
591 		hmask = net->xfrm.policy_bydst[dir].hmask;
592 		odst = net->xfrm.policy_bydst[dir].table;
593 		for (i = hmask; i >= 0; i--)
594 			INIT_HLIST_HEAD(odst + i);
595 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
596 			/* dir out => dst = remote, src = local */
597 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
598 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
599 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
600 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
601 		} else {
602 			/* dir in/fwd => dst = local, src = remote */
603 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
604 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
605 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
606 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
607 		}
608 	}
609 
610 	/* re-insert all policies by order of creation */
611 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
612 		if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
613 			/* skip socket policies */
614 			continue;
615 		}
616 		newpos = NULL;
617 		chain = policy_hash_bysel(net, &policy->selector,
618 					  policy->family,
619 					  xfrm_policy_id2dir(policy->index));
620 		hlist_for_each_entry(pol, chain, bydst) {
621 			if (policy->priority >= pol->priority)
622 				newpos = &pol->bydst;
623 			else
624 				break;
625 		}
626 		if (newpos)
627 			hlist_add_behind(&policy->bydst, newpos);
628 		else
629 			hlist_add_head(&policy->bydst, chain);
630 	}
631 
632 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
633 
634 	mutex_unlock(&hash_resize_mutex);
635 }
636 
637 void xfrm_policy_hash_rebuild(struct net *net)
638 {
639 	schedule_work(&net->xfrm.policy_hthresh.work);
640 }
641 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
642 
643 /* Generate new index... KAME seems to generate them ordered by cost
644  * of an absolute inpredictability of ordering of rules. This will not pass. */
645 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
646 {
647 	static u32 idx_generator;
648 
649 	for (;;) {
650 		struct hlist_head *list;
651 		struct xfrm_policy *p;
652 		u32 idx;
653 		int found;
654 
655 		if (!index) {
656 			idx = (idx_generator | dir);
657 			idx_generator += 8;
658 		} else {
659 			idx = index;
660 			index = 0;
661 		}
662 
663 		if (idx == 0)
664 			idx = 8;
665 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
666 		found = 0;
667 		hlist_for_each_entry(p, list, byidx) {
668 			if (p->index == idx) {
669 				found = 1;
670 				break;
671 			}
672 		}
673 		if (!found)
674 			return idx;
675 	}
676 }
677 
678 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
679 {
680 	u32 *p1 = (u32 *) s1;
681 	u32 *p2 = (u32 *) s2;
682 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
683 	int i;
684 
685 	for (i = 0; i < len; i++) {
686 		if (p1[i] != p2[i])
687 			return 1;
688 	}
689 
690 	return 0;
691 }
692 
693 static void xfrm_policy_requeue(struct xfrm_policy *old,
694 				struct xfrm_policy *new)
695 {
696 	struct xfrm_policy_queue *pq = &old->polq;
697 	struct sk_buff_head list;
698 
699 	if (skb_queue_empty(&pq->hold_queue))
700 		return;
701 
702 	__skb_queue_head_init(&list);
703 
704 	spin_lock_bh(&pq->hold_queue.lock);
705 	skb_queue_splice_init(&pq->hold_queue, &list);
706 	if (del_timer(&pq->hold_timer))
707 		xfrm_pol_put(old);
708 	spin_unlock_bh(&pq->hold_queue.lock);
709 
710 	pq = &new->polq;
711 
712 	spin_lock_bh(&pq->hold_queue.lock);
713 	skb_queue_splice(&list, &pq->hold_queue);
714 	pq->timeout = XFRM_QUEUE_TMO_MIN;
715 	if (!mod_timer(&pq->hold_timer, jiffies))
716 		xfrm_pol_hold(new);
717 	spin_unlock_bh(&pq->hold_queue.lock);
718 }
719 
720 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
721 				   struct xfrm_policy *pol)
722 {
723 	u32 mark = policy->mark.v & policy->mark.m;
724 
725 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
726 		return true;
727 
728 	if ((mark & pol->mark.m) == pol->mark.v &&
729 	    policy->priority == pol->priority)
730 		return true;
731 
732 	return false;
733 }
734 
735 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
736 {
737 	struct net *net = xp_net(policy);
738 	struct xfrm_policy *pol;
739 	struct xfrm_policy *delpol;
740 	struct hlist_head *chain;
741 	struct hlist_node *newpos;
742 
743 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
744 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
745 	delpol = NULL;
746 	newpos = NULL;
747 	hlist_for_each_entry(pol, chain, bydst) {
748 		if (pol->type == policy->type &&
749 		    !selector_cmp(&pol->selector, &policy->selector) &&
750 		    xfrm_policy_mark_match(policy, pol) &&
751 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
752 		    !WARN_ON(delpol)) {
753 			if (excl) {
754 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
755 				return -EEXIST;
756 			}
757 			delpol = pol;
758 			if (policy->priority > pol->priority)
759 				continue;
760 		} else if (policy->priority >= pol->priority) {
761 			newpos = &pol->bydst;
762 			continue;
763 		}
764 		if (delpol)
765 			break;
766 	}
767 	if (newpos)
768 		hlist_add_behind(&policy->bydst, newpos);
769 	else
770 		hlist_add_head(&policy->bydst, chain);
771 	__xfrm_policy_link(policy, dir);
772 
773 	/* After previous checking, family can either be AF_INET or AF_INET6 */
774 	if (policy->family == AF_INET)
775 		rt_genid_bump_ipv4(net);
776 	else
777 		rt_genid_bump_ipv6(net);
778 
779 	if (delpol) {
780 		xfrm_policy_requeue(delpol, policy);
781 		__xfrm_policy_unlink(delpol, dir);
782 	}
783 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
784 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
785 	policy->curlft.add_time = get_seconds();
786 	policy->curlft.use_time = 0;
787 	if (!mod_timer(&policy->timer, jiffies + HZ))
788 		xfrm_pol_hold(policy);
789 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
790 
791 	if (delpol)
792 		xfrm_policy_kill(delpol);
793 	else if (xfrm_bydst_should_resize(net, dir, NULL))
794 		schedule_work(&net->xfrm.policy_hash_work);
795 
796 	return 0;
797 }
798 EXPORT_SYMBOL(xfrm_policy_insert);
799 
800 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
801 					  int dir, struct xfrm_selector *sel,
802 					  struct xfrm_sec_ctx *ctx, int delete,
803 					  int *err)
804 {
805 	struct xfrm_policy *pol, *ret;
806 	struct hlist_head *chain;
807 
808 	*err = 0;
809 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
810 	chain = policy_hash_bysel(net, sel, sel->family, dir);
811 	ret = NULL;
812 	hlist_for_each_entry(pol, chain, bydst) {
813 		if (pol->type == type &&
814 		    (mark & pol->mark.m) == pol->mark.v &&
815 		    !selector_cmp(sel, &pol->selector) &&
816 		    xfrm_sec_ctx_match(ctx, pol->security)) {
817 			xfrm_pol_hold(pol);
818 			if (delete) {
819 				*err = security_xfrm_policy_delete(
820 								pol->security);
821 				if (*err) {
822 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
823 					return pol;
824 				}
825 				__xfrm_policy_unlink(pol, dir);
826 			}
827 			ret = pol;
828 			break;
829 		}
830 	}
831 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
832 
833 	if (ret && delete)
834 		xfrm_policy_kill(ret);
835 	return ret;
836 }
837 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
838 
839 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
840 				     int dir, u32 id, int delete, int *err)
841 {
842 	struct xfrm_policy *pol, *ret;
843 	struct hlist_head *chain;
844 
845 	*err = -ENOENT;
846 	if (xfrm_policy_id2dir(id) != dir)
847 		return NULL;
848 
849 	*err = 0;
850 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
851 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
852 	ret = NULL;
853 	hlist_for_each_entry(pol, chain, byidx) {
854 		if (pol->type == type && pol->index == id &&
855 		    (mark & pol->mark.m) == pol->mark.v) {
856 			xfrm_pol_hold(pol);
857 			if (delete) {
858 				*err = security_xfrm_policy_delete(
859 								pol->security);
860 				if (*err) {
861 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
862 					return pol;
863 				}
864 				__xfrm_policy_unlink(pol, dir);
865 			}
866 			ret = pol;
867 			break;
868 		}
869 	}
870 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
871 
872 	if (ret && delete)
873 		xfrm_policy_kill(ret);
874 	return ret;
875 }
876 EXPORT_SYMBOL(xfrm_policy_byid);
877 
878 #ifdef CONFIG_SECURITY_NETWORK_XFRM
879 static inline int
880 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
881 {
882 	int dir, err = 0;
883 
884 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
885 		struct xfrm_policy *pol;
886 		int i;
887 
888 		hlist_for_each_entry(pol,
889 				     &net->xfrm.policy_inexact[dir], bydst) {
890 			if (pol->type != type)
891 				continue;
892 			err = security_xfrm_policy_delete(pol->security);
893 			if (err) {
894 				xfrm_audit_policy_delete(pol, 0, task_valid);
895 				return err;
896 			}
897 		}
898 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
899 			hlist_for_each_entry(pol,
900 					     net->xfrm.policy_bydst[dir].table + i,
901 					     bydst) {
902 				if (pol->type != type)
903 					continue;
904 				err = security_xfrm_policy_delete(
905 								pol->security);
906 				if (err) {
907 					xfrm_audit_policy_delete(pol, 0,
908 								 task_valid);
909 					return err;
910 				}
911 			}
912 		}
913 	}
914 	return err;
915 }
916 #else
917 static inline int
918 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
919 {
920 	return 0;
921 }
922 #endif
923 
924 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
925 {
926 	int dir, err = 0, cnt = 0;
927 
928 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
929 
930 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
931 	if (err)
932 		goto out;
933 
934 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
935 		struct xfrm_policy *pol;
936 		int i;
937 
938 	again1:
939 		hlist_for_each_entry(pol,
940 				     &net->xfrm.policy_inexact[dir], bydst) {
941 			if (pol->type != type)
942 				continue;
943 			__xfrm_policy_unlink(pol, dir);
944 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
945 			cnt++;
946 
947 			xfrm_audit_policy_delete(pol, 1, task_valid);
948 
949 			xfrm_policy_kill(pol);
950 
951 			spin_lock_bh(&net->xfrm.xfrm_policy_lock);
952 			goto again1;
953 		}
954 
955 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
956 	again2:
957 			hlist_for_each_entry(pol,
958 					     net->xfrm.policy_bydst[dir].table + i,
959 					     bydst) {
960 				if (pol->type != type)
961 					continue;
962 				__xfrm_policy_unlink(pol, dir);
963 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
964 				cnt++;
965 
966 				xfrm_audit_policy_delete(pol, 1, task_valid);
967 				xfrm_policy_kill(pol);
968 
969 				spin_lock_bh(&net->xfrm.xfrm_policy_lock);
970 				goto again2;
971 			}
972 		}
973 
974 	}
975 	if (!cnt)
976 		err = -ESRCH;
977 	else
978 		xfrm_policy_cache_flush();
979 out:
980 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
981 	return err;
982 }
983 EXPORT_SYMBOL(xfrm_policy_flush);
984 
985 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
986 		     int (*func)(struct xfrm_policy *, int, int, void*),
987 		     void *data)
988 {
989 	struct xfrm_policy *pol;
990 	struct xfrm_policy_walk_entry *x;
991 	int error = 0;
992 
993 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
994 	    walk->type != XFRM_POLICY_TYPE_ANY)
995 		return -EINVAL;
996 
997 	if (list_empty(&walk->walk.all) && walk->seq != 0)
998 		return 0;
999 
1000 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1001 	if (list_empty(&walk->walk.all))
1002 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1003 	else
1004 		x = list_first_entry(&walk->walk.all,
1005 				     struct xfrm_policy_walk_entry, all);
1006 
1007 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1008 		if (x->dead)
1009 			continue;
1010 		pol = container_of(x, struct xfrm_policy, walk);
1011 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1012 		    walk->type != pol->type)
1013 			continue;
1014 		error = func(pol, xfrm_policy_id2dir(pol->index),
1015 			     walk->seq, data);
1016 		if (error) {
1017 			list_move_tail(&walk->walk.all, &x->all);
1018 			goto out;
1019 		}
1020 		walk->seq++;
1021 	}
1022 	if (walk->seq == 0) {
1023 		error = -ENOENT;
1024 		goto out;
1025 	}
1026 	list_del_init(&walk->walk.all);
1027 out:
1028 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1029 	return error;
1030 }
1031 EXPORT_SYMBOL(xfrm_policy_walk);
1032 
1033 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1034 {
1035 	INIT_LIST_HEAD(&walk->walk.all);
1036 	walk->walk.dead = 1;
1037 	walk->type = type;
1038 	walk->seq = 0;
1039 }
1040 EXPORT_SYMBOL(xfrm_policy_walk_init);
1041 
1042 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1043 {
1044 	if (list_empty(&walk->walk.all))
1045 		return;
1046 
1047 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1048 	list_del(&walk->walk.all);
1049 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1050 }
1051 EXPORT_SYMBOL(xfrm_policy_walk_done);
1052 
1053 /*
1054  * Find policy to apply to this flow.
1055  *
1056  * Returns 0 if policy found, else an -errno.
1057  */
1058 static int xfrm_policy_match(const struct xfrm_policy *pol,
1059 			     const struct flowi *fl,
1060 			     u8 type, u16 family, int dir)
1061 {
1062 	const struct xfrm_selector *sel = &pol->selector;
1063 	int ret = -ESRCH;
1064 	bool match;
1065 
1066 	if (pol->family != family ||
1067 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1068 	    pol->type != type)
1069 		return ret;
1070 
1071 	match = xfrm_selector_match(sel, fl, family);
1072 	if (match)
1073 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1074 						  dir);
1075 
1076 	return ret;
1077 }
1078 
1079 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1080 						     const struct flowi *fl,
1081 						     u16 family, u8 dir)
1082 {
1083 	int err;
1084 	struct xfrm_policy *pol, *ret;
1085 	const xfrm_address_t *daddr, *saddr;
1086 	struct hlist_head *chain;
1087 	unsigned int sequence;
1088 	u32 priority;
1089 
1090 	daddr = xfrm_flowi_daddr(fl, family);
1091 	saddr = xfrm_flowi_saddr(fl, family);
1092 	if (unlikely(!daddr || !saddr))
1093 		return NULL;
1094 
1095 	rcu_read_lock();
1096  retry:
1097 	do {
1098 		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1099 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
1100 	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1101 
1102 	priority = ~0U;
1103 	ret = NULL;
1104 	hlist_for_each_entry_rcu(pol, chain, bydst) {
1105 		err = xfrm_policy_match(pol, fl, type, family, dir);
1106 		if (err) {
1107 			if (err == -ESRCH)
1108 				continue;
1109 			else {
1110 				ret = ERR_PTR(err);
1111 				goto fail;
1112 			}
1113 		} else {
1114 			ret = pol;
1115 			priority = ret->priority;
1116 			break;
1117 		}
1118 	}
1119 	chain = &net->xfrm.policy_inexact[dir];
1120 	hlist_for_each_entry_rcu(pol, chain, bydst) {
1121 		if ((pol->priority >= priority) && ret)
1122 			break;
1123 
1124 		err = xfrm_policy_match(pol, fl, type, family, dir);
1125 		if (err) {
1126 			if (err == -ESRCH)
1127 				continue;
1128 			else {
1129 				ret = ERR_PTR(err);
1130 				goto fail;
1131 			}
1132 		} else {
1133 			ret = pol;
1134 			break;
1135 		}
1136 	}
1137 
1138 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1139 		goto retry;
1140 
1141 	if (ret && !xfrm_pol_hold_rcu(ret))
1142 		goto retry;
1143 fail:
1144 	rcu_read_unlock();
1145 
1146 	return ret;
1147 }
1148 
1149 static struct xfrm_policy *
1150 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir)
1151 {
1152 #ifdef CONFIG_XFRM_SUB_POLICY
1153 	struct xfrm_policy *pol;
1154 
1155 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1156 	if (pol != NULL)
1157 		return pol;
1158 #endif
1159 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1160 }
1161 
1162 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1163 						 const struct flowi *fl, u16 family)
1164 {
1165 	struct xfrm_policy *pol;
1166 
1167 	rcu_read_lock();
1168  again:
1169 	pol = rcu_dereference(sk->sk_policy[dir]);
1170 	if (pol != NULL) {
1171 		bool match;
1172 		int err = 0;
1173 
1174 		if (pol->family != family) {
1175 			pol = NULL;
1176 			goto out;
1177 		}
1178 
1179 		match = xfrm_selector_match(&pol->selector, fl, family);
1180 		if (match) {
1181 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v) {
1182 				pol = NULL;
1183 				goto out;
1184 			}
1185 			err = security_xfrm_policy_lookup(pol->security,
1186 						      fl->flowi_secid,
1187 						      dir);
1188 			if (!err) {
1189 				if (!xfrm_pol_hold_rcu(pol))
1190 					goto again;
1191 			} else if (err == -ESRCH) {
1192 				pol = NULL;
1193 			} else {
1194 				pol = ERR_PTR(err);
1195 			}
1196 		} else
1197 			pol = NULL;
1198 	}
1199 out:
1200 	rcu_read_unlock();
1201 	return pol;
1202 }
1203 
1204 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1205 {
1206 	struct net *net = xp_net(pol);
1207 
1208 	list_add(&pol->walk.all, &net->xfrm.policy_all);
1209 	net->xfrm.policy_count[dir]++;
1210 	xfrm_pol_hold(pol);
1211 }
1212 
1213 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1214 						int dir)
1215 {
1216 	struct net *net = xp_net(pol);
1217 
1218 	if (list_empty(&pol->walk.all))
1219 		return NULL;
1220 
1221 	/* Socket policies are not hashed. */
1222 	if (!hlist_unhashed(&pol->bydst)) {
1223 		hlist_del_rcu(&pol->bydst);
1224 		hlist_del(&pol->byidx);
1225 	}
1226 
1227 	list_del_init(&pol->walk.all);
1228 	net->xfrm.policy_count[dir]--;
1229 
1230 	return pol;
1231 }
1232 
1233 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1234 {
1235 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1236 }
1237 
1238 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1239 {
1240 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1241 }
1242 
1243 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1244 {
1245 	struct net *net = xp_net(pol);
1246 
1247 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1248 	pol = __xfrm_policy_unlink(pol, dir);
1249 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1250 	if (pol) {
1251 		xfrm_policy_kill(pol);
1252 		return 0;
1253 	}
1254 	return -ENOENT;
1255 }
1256 EXPORT_SYMBOL(xfrm_policy_delete);
1257 
1258 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1259 {
1260 	struct net *net = xp_net(pol);
1261 	struct xfrm_policy *old_pol;
1262 
1263 #ifdef CONFIG_XFRM_SUB_POLICY
1264 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1265 		return -EINVAL;
1266 #endif
1267 
1268 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1269 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1270 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1271 	if (pol) {
1272 		pol->curlft.add_time = get_seconds();
1273 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1274 		xfrm_sk_policy_link(pol, dir);
1275 	}
1276 	rcu_assign_pointer(sk->sk_policy[dir], pol);
1277 	if (old_pol) {
1278 		if (pol)
1279 			xfrm_policy_requeue(old_pol, pol);
1280 
1281 		/* Unlinking succeeds always. This is the only function
1282 		 * allowed to delete or replace socket policy.
1283 		 */
1284 		xfrm_sk_policy_unlink(old_pol, dir);
1285 	}
1286 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1287 
1288 	if (old_pol) {
1289 		xfrm_policy_kill(old_pol);
1290 	}
1291 	return 0;
1292 }
1293 
1294 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1295 {
1296 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1297 	struct net *net = xp_net(old);
1298 
1299 	if (newp) {
1300 		newp->selector = old->selector;
1301 		if (security_xfrm_policy_clone(old->security,
1302 					       &newp->security)) {
1303 			kfree(newp);
1304 			return NULL;  /* ENOMEM */
1305 		}
1306 		newp->lft = old->lft;
1307 		newp->curlft = old->curlft;
1308 		newp->mark = old->mark;
1309 		newp->action = old->action;
1310 		newp->flags = old->flags;
1311 		newp->xfrm_nr = old->xfrm_nr;
1312 		newp->index = old->index;
1313 		newp->type = old->type;
1314 		newp->family = old->family;
1315 		memcpy(newp->xfrm_vec, old->xfrm_vec,
1316 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1317 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1318 		xfrm_sk_policy_link(newp, dir);
1319 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1320 		xfrm_pol_put(newp);
1321 	}
1322 	return newp;
1323 }
1324 
1325 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1326 {
1327 	const struct xfrm_policy *p;
1328 	struct xfrm_policy *np;
1329 	int i, ret = 0;
1330 
1331 	rcu_read_lock();
1332 	for (i = 0; i < 2; i++) {
1333 		p = rcu_dereference(osk->sk_policy[i]);
1334 		if (p) {
1335 			np = clone_policy(p, i);
1336 			if (unlikely(!np)) {
1337 				ret = -ENOMEM;
1338 				break;
1339 			}
1340 			rcu_assign_pointer(sk->sk_policy[i], np);
1341 		}
1342 	}
1343 	rcu_read_unlock();
1344 	return ret;
1345 }
1346 
1347 static int
1348 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1349 	       xfrm_address_t *remote, unsigned short family, u32 mark)
1350 {
1351 	int err;
1352 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1353 
1354 	if (unlikely(afinfo == NULL))
1355 		return -EINVAL;
1356 	err = afinfo->get_saddr(net, oif, local, remote, mark);
1357 	rcu_read_unlock();
1358 	return err;
1359 }
1360 
1361 /* Resolve list of templates for the flow, given policy. */
1362 
1363 static int
1364 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1365 		      struct xfrm_state **xfrm, unsigned short family)
1366 {
1367 	struct net *net = xp_net(policy);
1368 	int nx;
1369 	int i, error;
1370 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1371 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1372 	xfrm_address_t tmp;
1373 
1374 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1375 		struct xfrm_state *x;
1376 		xfrm_address_t *remote = daddr;
1377 		xfrm_address_t *local  = saddr;
1378 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1379 
1380 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1381 		    tmpl->mode == XFRM_MODE_BEET) {
1382 			remote = &tmpl->id.daddr;
1383 			local = &tmpl->saddr;
1384 			if (xfrm_addr_any(local, tmpl->encap_family)) {
1385 				error = xfrm_get_saddr(net, fl->flowi_oif,
1386 						       &tmp, remote,
1387 						       tmpl->encap_family, 0);
1388 				if (error)
1389 					goto fail;
1390 				local = &tmp;
1391 			}
1392 		}
1393 
1394 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1395 
1396 		if (x && x->km.state == XFRM_STATE_VALID) {
1397 			xfrm[nx++] = x;
1398 			daddr = remote;
1399 			saddr = local;
1400 			continue;
1401 		}
1402 		if (x) {
1403 			error = (x->km.state == XFRM_STATE_ERROR ?
1404 				 -EINVAL : -EAGAIN);
1405 			xfrm_state_put(x);
1406 		} else if (error == -ESRCH) {
1407 			error = -EAGAIN;
1408 		}
1409 
1410 		if (!tmpl->optional)
1411 			goto fail;
1412 	}
1413 	return nx;
1414 
1415 fail:
1416 	for (nx--; nx >= 0; nx--)
1417 		xfrm_state_put(xfrm[nx]);
1418 	return error;
1419 }
1420 
1421 static int
1422 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1423 		  struct xfrm_state **xfrm, unsigned short family)
1424 {
1425 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1426 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1427 	int cnx = 0;
1428 	int error;
1429 	int ret;
1430 	int i;
1431 
1432 	for (i = 0; i < npols; i++) {
1433 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1434 			error = -ENOBUFS;
1435 			goto fail;
1436 		}
1437 
1438 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1439 		if (ret < 0) {
1440 			error = ret;
1441 			goto fail;
1442 		} else
1443 			cnx += ret;
1444 	}
1445 
1446 	/* found states are sorted for outbound processing */
1447 	if (npols > 1)
1448 		xfrm_state_sort(xfrm, tpp, cnx, family);
1449 
1450 	return cnx;
1451 
1452  fail:
1453 	for (cnx--; cnx >= 0; cnx--)
1454 		xfrm_state_put(tpp[cnx]);
1455 	return error;
1456 
1457 }
1458 
1459 static int xfrm_get_tos(const struct flowi *fl, int family)
1460 {
1461 	const struct xfrm_policy_afinfo *afinfo;
1462 	int tos = 0;
1463 
1464 	afinfo = xfrm_policy_get_afinfo(family);
1465 	tos = afinfo ? afinfo->get_tos(fl) : 0;
1466 
1467 	rcu_read_unlock();
1468 
1469 	return tos;
1470 }
1471 
1472 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1473 {
1474 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1475 	struct dst_ops *dst_ops;
1476 	struct xfrm_dst *xdst;
1477 
1478 	if (!afinfo)
1479 		return ERR_PTR(-EINVAL);
1480 
1481 	switch (family) {
1482 	case AF_INET:
1483 		dst_ops = &net->xfrm.xfrm4_dst_ops;
1484 		break;
1485 #if IS_ENABLED(CONFIG_IPV6)
1486 	case AF_INET6:
1487 		dst_ops = &net->xfrm.xfrm6_dst_ops;
1488 		break;
1489 #endif
1490 	default:
1491 		BUG();
1492 	}
1493 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1494 
1495 	if (likely(xdst)) {
1496 		struct dst_entry *dst = &xdst->u.dst;
1497 
1498 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1499 	} else
1500 		xdst = ERR_PTR(-ENOBUFS);
1501 
1502 	rcu_read_unlock();
1503 
1504 	return xdst;
1505 }
1506 
1507 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1508 				 int nfheader_len)
1509 {
1510 	const struct xfrm_policy_afinfo *afinfo =
1511 		xfrm_policy_get_afinfo(dst->ops->family);
1512 	int err;
1513 
1514 	if (!afinfo)
1515 		return -EINVAL;
1516 
1517 	err = afinfo->init_path(path, dst, nfheader_len);
1518 
1519 	rcu_read_unlock();
1520 
1521 	return err;
1522 }
1523 
1524 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1525 				const struct flowi *fl)
1526 {
1527 	const struct xfrm_policy_afinfo *afinfo =
1528 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1529 	int err;
1530 
1531 	if (!afinfo)
1532 		return -EINVAL;
1533 
1534 	err = afinfo->fill_dst(xdst, dev, fl);
1535 
1536 	rcu_read_unlock();
1537 
1538 	return err;
1539 }
1540 
1541 
1542 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1543  * all the metrics... Shortly, bundle a bundle.
1544  */
1545 
1546 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1547 					    struct xfrm_state **xfrm, int nx,
1548 					    const struct flowi *fl,
1549 					    struct dst_entry *dst)
1550 {
1551 	struct net *net = xp_net(policy);
1552 	unsigned long now = jiffies;
1553 	struct net_device *dev;
1554 	struct xfrm_mode *inner_mode;
1555 	struct dst_entry *dst_prev = NULL;
1556 	struct dst_entry *dst0 = NULL;
1557 	int i = 0;
1558 	int err;
1559 	int header_len = 0;
1560 	int nfheader_len = 0;
1561 	int trailer_len = 0;
1562 	int tos;
1563 	int family = policy->selector.family;
1564 	xfrm_address_t saddr, daddr;
1565 
1566 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1567 
1568 	tos = xfrm_get_tos(fl, family);
1569 
1570 	dst_hold(dst);
1571 
1572 	for (; i < nx; i++) {
1573 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1574 		struct dst_entry *dst1 = &xdst->u.dst;
1575 
1576 		err = PTR_ERR(xdst);
1577 		if (IS_ERR(xdst)) {
1578 			dst_release(dst);
1579 			goto put_states;
1580 		}
1581 
1582 		if (!dst_prev)
1583 			dst0 = dst1;
1584 		else
1585 			/* Ref count is taken during xfrm_alloc_dst()
1586 			 * No need to do dst_clone() on dst1
1587 			 */
1588 			dst_prev->child = dst1;
1589 
1590 		if (xfrm[i]->sel.family == AF_UNSPEC) {
1591 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1592 							xfrm_af2proto(family));
1593 			if (!inner_mode) {
1594 				err = -EAFNOSUPPORT;
1595 				dst_release(dst);
1596 				goto put_states;
1597 			}
1598 		} else
1599 			inner_mode = xfrm[i]->inner_mode;
1600 
1601 		xdst->route = dst;
1602 		dst_copy_metrics(dst1, dst);
1603 
1604 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1605 			family = xfrm[i]->props.family;
1606 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1607 					      &saddr, &daddr, family,
1608 					      xfrm[i]->props.output_mark);
1609 			err = PTR_ERR(dst);
1610 			if (IS_ERR(dst))
1611 				goto put_states;
1612 		} else
1613 			dst_hold(dst);
1614 
1615 		dst1->xfrm = xfrm[i];
1616 		xdst->xfrm_genid = xfrm[i]->genid;
1617 
1618 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1619 		dst1->flags |= DST_HOST;
1620 		dst1->lastuse = now;
1621 
1622 		dst1->input = dst_discard;
1623 		dst1->output = inner_mode->afinfo->output;
1624 
1625 		dst1->next = dst_prev;
1626 		dst_prev = dst1;
1627 
1628 		header_len += xfrm[i]->props.header_len;
1629 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1630 			nfheader_len += xfrm[i]->props.header_len;
1631 		trailer_len += xfrm[i]->props.trailer_len;
1632 	}
1633 
1634 	dst_prev->child = dst;
1635 	dst0->path = dst;
1636 
1637 	err = -ENODEV;
1638 	dev = dst->dev;
1639 	if (!dev)
1640 		goto free_dst;
1641 
1642 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1643 	xfrm_init_pmtu(dst_prev);
1644 
1645 	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1646 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1647 
1648 		err = xfrm_fill_dst(xdst, dev, fl);
1649 		if (err)
1650 			goto free_dst;
1651 
1652 		dst_prev->header_len = header_len;
1653 		dst_prev->trailer_len = trailer_len;
1654 		header_len -= xdst->u.dst.xfrm->props.header_len;
1655 		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1656 	}
1657 
1658 out:
1659 	return dst0;
1660 
1661 put_states:
1662 	for (; i < nx; i++)
1663 		xfrm_state_put(xfrm[i]);
1664 free_dst:
1665 	if (dst0)
1666 		dst_release_immediate(dst0);
1667 	dst0 = ERR_PTR(err);
1668 	goto out;
1669 }
1670 
1671 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1672 				struct xfrm_policy **pols,
1673 				int *num_pols, int *num_xfrms)
1674 {
1675 	int i;
1676 
1677 	if (*num_pols == 0 || !pols[0]) {
1678 		*num_pols = 0;
1679 		*num_xfrms = 0;
1680 		return 0;
1681 	}
1682 	if (IS_ERR(pols[0]))
1683 		return PTR_ERR(pols[0]);
1684 
1685 	*num_xfrms = pols[0]->xfrm_nr;
1686 
1687 #ifdef CONFIG_XFRM_SUB_POLICY
1688 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1689 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1690 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1691 						    XFRM_POLICY_TYPE_MAIN,
1692 						    fl, family,
1693 						    XFRM_POLICY_OUT);
1694 		if (pols[1]) {
1695 			if (IS_ERR(pols[1])) {
1696 				xfrm_pols_put(pols, *num_pols);
1697 				return PTR_ERR(pols[1]);
1698 			}
1699 			(*num_pols)++;
1700 			(*num_xfrms) += pols[1]->xfrm_nr;
1701 		}
1702 	}
1703 #endif
1704 	for (i = 0; i < *num_pols; i++) {
1705 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1706 			*num_xfrms = -1;
1707 			break;
1708 		}
1709 	}
1710 
1711 	return 0;
1712 
1713 }
1714 
1715 static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old)
1716 {
1717 	this_cpu_write(xfrm_last_dst, xdst);
1718 	if (old)
1719 		dst_release(&old->u.dst);
1720 }
1721 
1722 static void __xfrm_pcpu_work_fn(void)
1723 {
1724 	struct xfrm_dst *old;
1725 
1726 	old = this_cpu_read(xfrm_last_dst);
1727 	if (old && !xfrm_bundle_ok(old))
1728 		xfrm_last_dst_update(NULL, old);
1729 }
1730 
1731 static void xfrm_pcpu_work_fn(struct work_struct *work)
1732 {
1733 	local_bh_disable();
1734 	rcu_read_lock();
1735 	__xfrm_pcpu_work_fn();
1736 	rcu_read_unlock();
1737 	local_bh_enable();
1738 }
1739 
1740 void xfrm_policy_cache_flush(void)
1741 {
1742 	struct xfrm_dst *old;
1743 	bool found = 0;
1744 	int cpu;
1745 
1746 	local_bh_disable();
1747 	rcu_read_lock();
1748 	for_each_possible_cpu(cpu) {
1749 		old = per_cpu(xfrm_last_dst, cpu);
1750 		if (old && !xfrm_bundle_ok(old)) {
1751 			if (smp_processor_id() == cpu) {
1752 				__xfrm_pcpu_work_fn();
1753 				continue;
1754 			}
1755 			found = true;
1756 			break;
1757 		}
1758 	}
1759 
1760 	rcu_read_unlock();
1761 	local_bh_enable();
1762 
1763 	if (!found)
1764 		return;
1765 
1766 	get_online_cpus();
1767 
1768 	for_each_possible_cpu(cpu) {
1769 		bool bundle_release;
1770 
1771 		rcu_read_lock();
1772 		old = per_cpu(xfrm_last_dst, cpu);
1773 		bundle_release = old && !xfrm_bundle_ok(old);
1774 		rcu_read_unlock();
1775 
1776 		if (!bundle_release)
1777 			continue;
1778 
1779 		if (cpu_online(cpu)) {
1780 			schedule_work_on(cpu, &xfrm_pcpu_work[cpu]);
1781 			continue;
1782 		}
1783 
1784 		rcu_read_lock();
1785 		old = per_cpu(xfrm_last_dst, cpu);
1786 		if (old && !xfrm_bundle_ok(old)) {
1787 			per_cpu(xfrm_last_dst, cpu) = NULL;
1788 			dst_release(&old->u.dst);
1789 		}
1790 		rcu_read_unlock();
1791 	}
1792 
1793 	put_online_cpus();
1794 }
1795 
1796 static bool xfrm_xdst_can_reuse(struct xfrm_dst *xdst,
1797 				struct xfrm_state * const xfrm[],
1798 				int num)
1799 {
1800 	const struct dst_entry *dst = &xdst->u.dst;
1801 	int i;
1802 
1803 	if (xdst->num_xfrms != num)
1804 		return false;
1805 
1806 	for (i = 0; i < num; i++) {
1807 		if (!dst || dst->xfrm != xfrm[i])
1808 			return false;
1809 		dst = dst->child;
1810 	}
1811 
1812 	return xfrm_bundle_ok(xdst);
1813 }
1814 
1815 static struct xfrm_dst *
1816 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1817 			       const struct flowi *fl, u16 family,
1818 			       struct dst_entry *dst_orig)
1819 {
1820 	struct net *net = xp_net(pols[0]);
1821 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1822 	struct xfrm_dst *xdst, *old;
1823 	struct dst_entry *dst;
1824 	int err;
1825 
1826 	/* Try to instantiate a bundle */
1827 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1828 	if (err <= 0) {
1829 		if (err != 0 && err != -EAGAIN)
1830 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1831 		return ERR_PTR(err);
1832 	}
1833 
1834 	xdst = this_cpu_read(xfrm_last_dst);
1835 	if (xdst &&
1836 	    xdst->u.dst.dev == dst_orig->dev &&
1837 	    xdst->num_pols == num_pols &&
1838 	    memcmp(xdst->pols, pols,
1839 		   sizeof(struct xfrm_policy *) * num_pols) == 0 &&
1840 	    xfrm_xdst_can_reuse(xdst, xfrm, err)) {
1841 		dst_hold(&xdst->u.dst);
1842 		xfrm_pols_put(pols, num_pols);
1843 		while (err > 0)
1844 			xfrm_state_put(xfrm[--err]);
1845 		return xdst;
1846 	}
1847 
1848 	old = xdst;
1849 
1850 	dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1851 	if (IS_ERR(dst)) {
1852 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1853 		return ERR_CAST(dst);
1854 	}
1855 
1856 	xdst = (struct xfrm_dst *)dst;
1857 	xdst->num_xfrms = err;
1858 	xdst->num_pols = num_pols;
1859 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1860 	xdst->policy_genid = atomic_read(&pols[0]->genid);
1861 
1862 	atomic_set(&xdst->u.dst.__refcnt, 2);
1863 	xfrm_last_dst_update(xdst, old);
1864 
1865 	return xdst;
1866 }
1867 
1868 static void xfrm_policy_queue_process(struct timer_list *t)
1869 {
1870 	struct sk_buff *skb;
1871 	struct sock *sk;
1872 	struct dst_entry *dst;
1873 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
1874 	struct net *net = xp_net(pol);
1875 	struct xfrm_policy_queue *pq = &pol->polq;
1876 	struct flowi fl;
1877 	struct sk_buff_head list;
1878 
1879 	spin_lock(&pq->hold_queue.lock);
1880 	skb = skb_peek(&pq->hold_queue);
1881 	if (!skb) {
1882 		spin_unlock(&pq->hold_queue.lock);
1883 		goto out;
1884 	}
1885 	dst = skb_dst(skb);
1886 	sk = skb->sk;
1887 	xfrm_decode_session(skb, &fl, dst->ops->family);
1888 	spin_unlock(&pq->hold_queue.lock);
1889 
1890 	dst_hold(dst->path);
1891 	dst = xfrm_lookup(net, dst->path, &fl, sk, 0);
1892 	if (IS_ERR(dst))
1893 		goto purge_queue;
1894 
1895 	if (dst->flags & DST_XFRM_QUEUE) {
1896 		dst_release(dst);
1897 
1898 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1899 			goto purge_queue;
1900 
1901 		pq->timeout = pq->timeout << 1;
1902 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1903 			xfrm_pol_hold(pol);
1904 	goto out;
1905 	}
1906 
1907 	dst_release(dst);
1908 
1909 	__skb_queue_head_init(&list);
1910 
1911 	spin_lock(&pq->hold_queue.lock);
1912 	pq->timeout = 0;
1913 	skb_queue_splice_init(&pq->hold_queue, &list);
1914 	spin_unlock(&pq->hold_queue.lock);
1915 
1916 	while (!skb_queue_empty(&list)) {
1917 		skb = __skb_dequeue(&list);
1918 
1919 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1920 		dst_hold(skb_dst(skb)->path);
1921 		dst = xfrm_lookup(net, skb_dst(skb)->path, &fl, skb->sk, 0);
1922 		if (IS_ERR(dst)) {
1923 			kfree_skb(skb);
1924 			continue;
1925 		}
1926 
1927 		nf_reset(skb);
1928 		skb_dst_drop(skb);
1929 		skb_dst_set(skb, dst);
1930 
1931 		dst_output(net, skb->sk, skb);
1932 	}
1933 
1934 out:
1935 	xfrm_pol_put(pol);
1936 	return;
1937 
1938 purge_queue:
1939 	pq->timeout = 0;
1940 	skb_queue_purge(&pq->hold_queue);
1941 	xfrm_pol_put(pol);
1942 }
1943 
1944 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1945 {
1946 	unsigned long sched_next;
1947 	struct dst_entry *dst = skb_dst(skb);
1948 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1949 	struct xfrm_policy *pol = xdst->pols[0];
1950 	struct xfrm_policy_queue *pq = &pol->polq;
1951 
1952 	if (unlikely(skb_fclone_busy(sk, skb))) {
1953 		kfree_skb(skb);
1954 		return 0;
1955 	}
1956 
1957 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1958 		kfree_skb(skb);
1959 		return -EAGAIN;
1960 	}
1961 
1962 	skb_dst_force(skb);
1963 
1964 	spin_lock_bh(&pq->hold_queue.lock);
1965 
1966 	if (!pq->timeout)
1967 		pq->timeout = XFRM_QUEUE_TMO_MIN;
1968 
1969 	sched_next = jiffies + pq->timeout;
1970 
1971 	if (del_timer(&pq->hold_timer)) {
1972 		if (time_before(pq->hold_timer.expires, sched_next))
1973 			sched_next = pq->hold_timer.expires;
1974 		xfrm_pol_put(pol);
1975 	}
1976 
1977 	__skb_queue_tail(&pq->hold_queue, skb);
1978 	if (!mod_timer(&pq->hold_timer, sched_next))
1979 		xfrm_pol_hold(pol);
1980 
1981 	spin_unlock_bh(&pq->hold_queue.lock);
1982 
1983 	return 0;
1984 }
1985 
1986 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1987 						 struct xfrm_flo *xflo,
1988 						 const struct flowi *fl,
1989 						 int num_xfrms,
1990 						 u16 family)
1991 {
1992 	int err;
1993 	struct net_device *dev;
1994 	struct dst_entry *dst;
1995 	struct dst_entry *dst1;
1996 	struct xfrm_dst *xdst;
1997 
1998 	xdst = xfrm_alloc_dst(net, family);
1999 	if (IS_ERR(xdst))
2000 		return xdst;
2001 
2002 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2003 	    net->xfrm.sysctl_larval_drop ||
2004 	    num_xfrms <= 0)
2005 		return xdst;
2006 
2007 	dst = xflo->dst_orig;
2008 	dst1 = &xdst->u.dst;
2009 	dst_hold(dst);
2010 	xdst->route = dst;
2011 
2012 	dst_copy_metrics(dst1, dst);
2013 
2014 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2015 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2016 	dst1->lastuse = jiffies;
2017 
2018 	dst1->input = dst_discard;
2019 	dst1->output = xdst_queue_output;
2020 
2021 	dst_hold(dst);
2022 	dst1->child = dst;
2023 	dst1->path = dst;
2024 
2025 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2026 
2027 	err = -ENODEV;
2028 	dev = dst->dev;
2029 	if (!dev)
2030 		goto free_dst;
2031 
2032 	err = xfrm_fill_dst(xdst, dev, fl);
2033 	if (err)
2034 		goto free_dst;
2035 
2036 out:
2037 	return xdst;
2038 
2039 free_dst:
2040 	dst_release(dst1);
2041 	xdst = ERR_PTR(err);
2042 	goto out;
2043 }
2044 
2045 static struct xfrm_dst *
2046 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo)
2047 {
2048 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2049 	int num_pols = 0, num_xfrms = 0, err;
2050 	struct xfrm_dst *xdst;
2051 
2052 	/* Resolve policies to use if we couldn't get them from
2053 	 * previous cache entry */
2054 	num_pols = 1;
2055 	pols[0] = xfrm_policy_lookup(net, fl, family, dir);
2056 	err = xfrm_expand_policies(fl, family, pols,
2057 					   &num_pols, &num_xfrms);
2058 	if (err < 0)
2059 		goto inc_error;
2060 	if (num_pols == 0)
2061 		return NULL;
2062 	if (num_xfrms <= 0)
2063 		goto make_dummy_bundle;
2064 
2065 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2066 						  xflo->dst_orig);
2067 	if (IS_ERR(xdst)) {
2068 		err = PTR_ERR(xdst);
2069 		if (err != -EAGAIN)
2070 			goto error;
2071 		goto make_dummy_bundle;
2072 	} else if (xdst == NULL) {
2073 		num_xfrms = 0;
2074 		goto make_dummy_bundle;
2075 	}
2076 
2077 	return xdst;
2078 
2079 make_dummy_bundle:
2080 	/* We found policies, but there's no bundles to instantiate:
2081 	 * either because the policy blocks, has no transformations or
2082 	 * we could not build template (no xfrm_states).*/
2083 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2084 	if (IS_ERR(xdst)) {
2085 		xfrm_pols_put(pols, num_pols);
2086 		return ERR_CAST(xdst);
2087 	}
2088 	xdst->num_pols = num_pols;
2089 	xdst->num_xfrms = num_xfrms;
2090 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2091 
2092 	return xdst;
2093 
2094 inc_error:
2095 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2096 error:
2097 	xfrm_pols_put(pols, num_pols);
2098 	return ERR_PTR(err);
2099 }
2100 
2101 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2102 					struct dst_entry *dst_orig)
2103 {
2104 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2105 	struct dst_entry *ret;
2106 
2107 	if (!afinfo) {
2108 		dst_release(dst_orig);
2109 		return ERR_PTR(-EINVAL);
2110 	} else {
2111 		ret = afinfo->blackhole_route(net, dst_orig);
2112 	}
2113 	rcu_read_unlock();
2114 
2115 	return ret;
2116 }
2117 
2118 /* Main function: finds/creates a bundle for given flow.
2119  *
2120  * At the moment we eat a raw IP route. Mostly to speed up lookups
2121  * on interfaces with disabled IPsec.
2122  */
2123 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2124 			      const struct flowi *fl,
2125 			      const struct sock *sk, int flags)
2126 {
2127 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2128 	struct xfrm_dst *xdst;
2129 	struct dst_entry *dst, *route;
2130 	u16 family = dst_orig->ops->family;
2131 	u8 dir = XFRM_POLICY_OUT;
2132 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2133 
2134 	dst = NULL;
2135 	xdst = NULL;
2136 	route = NULL;
2137 
2138 	sk = sk_const_to_full_sk(sk);
2139 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2140 		num_pols = 1;
2141 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
2142 		err = xfrm_expand_policies(fl, family, pols,
2143 					   &num_pols, &num_xfrms);
2144 		if (err < 0)
2145 			goto dropdst;
2146 
2147 		if (num_pols) {
2148 			if (num_xfrms <= 0) {
2149 				drop_pols = num_pols;
2150 				goto no_transform;
2151 			}
2152 
2153 			xdst = xfrm_resolve_and_create_bundle(
2154 					pols, num_pols, fl,
2155 					family, dst_orig);
2156 			if (IS_ERR(xdst)) {
2157 				xfrm_pols_put(pols, num_pols);
2158 				err = PTR_ERR(xdst);
2159 				goto dropdst;
2160 			} else if (xdst == NULL) {
2161 				num_xfrms = 0;
2162 				drop_pols = num_pols;
2163 				goto no_transform;
2164 			}
2165 
2166 			route = xdst->route;
2167 		}
2168 	}
2169 
2170 	if (xdst == NULL) {
2171 		struct xfrm_flo xflo;
2172 
2173 		xflo.dst_orig = dst_orig;
2174 		xflo.flags = flags;
2175 
2176 		/* To accelerate a bit...  */
2177 		if ((dst_orig->flags & DST_NOXFRM) ||
2178 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2179 			goto nopol;
2180 
2181 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo);
2182 		if (xdst == NULL)
2183 			goto nopol;
2184 		if (IS_ERR(xdst)) {
2185 			err = PTR_ERR(xdst);
2186 			goto dropdst;
2187 		}
2188 
2189 		num_pols = xdst->num_pols;
2190 		num_xfrms = xdst->num_xfrms;
2191 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2192 		route = xdst->route;
2193 	}
2194 
2195 	dst = &xdst->u.dst;
2196 	if (route == NULL && num_xfrms > 0) {
2197 		/* The only case when xfrm_bundle_lookup() returns a
2198 		 * bundle with null route, is when the template could
2199 		 * not be resolved. It means policies are there, but
2200 		 * bundle could not be created, since we don't yet
2201 		 * have the xfrm_state's. We need to wait for KM to
2202 		 * negotiate new SA's or bail out with error.*/
2203 		if (net->xfrm.sysctl_larval_drop) {
2204 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2205 			err = -EREMOTE;
2206 			goto error;
2207 		}
2208 
2209 		err = -EAGAIN;
2210 
2211 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2212 		goto error;
2213 	}
2214 
2215 no_transform:
2216 	if (num_pols == 0)
2217 		goto nopol;
2218 
2219 	if ((flags & XFRM_LOOKUP_ICMP) &&
2220 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2221 		err = -ENOENT;
2222 		goto error;
2223 	}
2224 
2225 	for (i = 0; i < num_pols; i++)
2226 		pols[i]->curlft.use_time = get_seconds();
2227 
2228 	if (num_xfrms < 0) {
2229 		/* Prohibit the flow */
2230 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2231 		err = -EPERM;
2232 		goto error;
2233 	} else if (num_xfrms > 0) {
2234 		/* Flow transformed */
2235 		dst_release(dst_orig);
2236 	} else {
2237 		/* Flow passes untransformed */
2238 		dst_release(dst);
2239 		dst = dst_orig;
2240 	}
2241 ok:
2242 	xfrm_pols_put(pols, drop_pols);
2243 	if (dst && dst->xfrm &&
2244 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2245 		dst->flags |= DST_XFRM_TUNNEL;
2246 	return dst;
2247 
2248 nopol:
2249 	if (!(flags & XFRM_LOOKUP_ICMP)) {
2250 		dst = dst_orig;
2251 		goto ok;
2252 	}
2253 	err = -ENOENT;
2254 error:
2255 	dst_release(dst);
2256 dropdst:
2257 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2258 		dst_release(dst_orig);
2259 	xfrm_pols_put(pols, drop_pols);
2260 	return ERR_PTR(err);
2261 }
2262 EXPORT_SYMBOL(xfrm_lookup);
2263 
2264 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2265  * Otherwise we may send out blackholed packets.
2266  */
2267 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2268 				    const struct flowi *fl,
2269 				    const struct sock *sk, int flags)
2270 {
2271 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2272 					    flags | XFRM_LOOKUP_QUEUE |
2273 					    XFRM_LOOKUP_KEEP_DST_REF);
2274 
2275 	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2276 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
2277 
2278 	return dst;
2279 }
2280 EXPORT_SYMBOL(xfrm_lookup_route);
2281 
2282 static inline int
2283 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2284 {
2285 	struct xfrm_state *x;
2286 
2287 	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2288 		return 0;
2289 	x = skb->sp->xvec[idx];
2290 	if (!x->type->reject)
2291 		return 0;
2292 	return x->type->reject(x, skb, fl);
2293 }
2294 
2295 /* When skb is transformed back to its "native" form, we have to
2296  * check policy restrictions. At the moment we make this in maximally
2297  * stupid way. Shame on me. :-) Of course, connected sockets must
2298  * have policy cached at them.
2299  */
2300 
2301 static inline int
2302 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2303 	      unsigned short family)
2304 {
2305 	if (xfrm_state_kern(x))
2306 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2307 	return	x->id.proto == tmpl->id.proto &&
2308 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2309 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2310 		x->props.mode == tmpl->mode &&
2311 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2312 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2313 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2314 		  xfrm_state_addr_cmp(tmpl, x, family));
2315 }
2316 
2317 /*
2318  * 0 or more than 0 is returned when validation is succeeded (either bypass
2319  * because of optional transport mode, or next index of the mathced secpath
2320  * state with the template.
2321  * -1 is returned when no matching template is found.
2322  * Otherwise "-2 - errored_index" is returned.
2323  */
2324 static inline int
2325 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2326 	       unsigned short family)
2327 {
2328 	int idx = start;
2329 
2330 	if (tmpl->optional) {
2331 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2332 			return start;
2333 	} else
2334 		start = -1;
2335 	for (; idx < sp->len; idx++) {
2336 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2337 			return ++idx;
2338 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2339 			if (start == -1)
2340 				start = -2-idx;
2341 			break;
2342 		}
2343 	}
2344 	return start;
2345 }
2346 
2347 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2348 			  unsigned int family, int reverse)
2349 {
2350 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2351 	int err;
2352 
2353 	if (unlikely(afinfo == NULL))
2354 		return -EAFNOSUPPORT;
2355 
2356 	afinfo->decode_session(skb, fl, reverse);
2357 	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2358 	rcu_read_unlock();
2359 	return err;
2360 }
2361 EXPORT_SYMBOL(__xfrm_decode_session);
2362 
2363 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2364 {
2365 	for (; k < sp->len; k++) {
2366 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2367 			*idxp = k;
2368 			return 1;
2369 		}
2370 	}
2371 
2372 	return 0;
2373 }
2374 
2375 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2376 			unsigned short family)
2377 {
2378 	struct net *net = dev_net(skb->dev);
2379 	struct xfrm_policy *pol;
2380 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2381 	int npols = 0;
2382 	int xfrm_nr;
2383 	int pi;
2384 	int reverse;
2385 	struct flowi fl;
2386 	int xerr_idx = -1;
2387 
2388 	reverse = dir & ~XFRM_POLICY_MASK;
2389 	dir &= XFRM_POLICY_MASK;
2390 
2391 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2392 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2393 		return 0;
2394 	}
2395 
2396 	nf_nat_decode_session(skb, &fl, family);
2397 
2398 	/* First, check used SA against their selectors. */
2399 	if (skb->sp) {
2400 		int i;
2401 
2402 		for (i = skb->sp->len-1; i >= 0; i--) {
2403 			struct xfrm_state *x = skb->sp->xvec[i];
2404 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2405 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2406 				return 0;
2407 			}
2408 		}
2409 	}
2410 
2411 	pol = NULL;
2412 	sk = sk_to_full_sk(sk);
2413 	if (sk && sk->sk_policy[dir]) {
2414 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
2415 		if (IS_ERR(pol)) {
2416 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2417 			return 0;
2418 		}
2419 	}
2420 
2421 	if (!pol)
2422 		pol = xfrm_policy_lookup(net, &fl, family, dir);
2423 
2424 	if (IS_ERR(pol)) {
2425 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2426 		return 0;
2427 	}
2428 
2429 	if (!pol) {
2430 		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2431 			xfrm_secpath_reject(xerr_idx, skb, &fl);
2432 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2433 			return 0;
2434 		}
2435 		return 1;
2436 	}
2437 
2438 	pol->curlft.use_time = get_seconds();
2439 
2440 	pols[0] = pol;
2441 	npols++;
2442 #ifdef CONFIG_XFRM_SUB_POLICY
2443 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2444 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2445 						    &fl, family,
2446 						    XFRM_POLICY_IN);
2447 		if (pols[1]) {
2448 			if (IS_ERR(pols[1])) {
2449 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2450 				return 0;
2451 			}
2452 			pols[1]->curlft.use_time = get_seconds();
2453 			npols++;
2454 		}
2455 	}
2456 #endif
2457 
2458 	if (pol->action == XFRM_POLICY_ALLOW) {
2459 		struct sec_path *sp;
2460 		static struct sec_path dummy;
2461 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2462 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2463 		struct xfrm_tmpl **tpp = tp;
2464 		int ti = 0;
2465 		int i, k;
2466 
2467 		if ((sp = skb->sp) == NULL)
2468 			sp = &dummy;
2469 
2470 		for (pi = 0; pi < npols; pi++) {
2471 			if (pols[pi] != pol &&
2472 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2473 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2474 				goto reject;
2475 			}
2476 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2477 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2478 				goto reject_error;
2479 			}
2480 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2481 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2482 		}
2483 		xfrm_nr = ti;
2484 		if (npols > 1) {
2485 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2486 			tpp = stp;
2487 		}
2488 
2489 		/* For each tunnel xfrm, find the first matching tmpl.
2490 		 * For each tmpl before that, find corresponding xfrm.
2491 		 * Order is _important_. Later we will implement
2492 		 * some barriers, but at the moment barriers
2493 		 * are implied between each two transformations.
2494 		 */
2495 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2496 			k = xfrm_policy_ok(tpp[i], sp, k, family);
2497 			if (k < 0) {
2498 				if (k < -1)
2499 					/* "-2 - errored_index" returned */
2500 					xerr_idx = -(2+k);
2501 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2502 				goto reject;
2503 			}
2504 		}
2505 
2506 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2507 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2508 			goto reject;
2509 		}
2510 
2511 		xfrm_pols_put(pols, npols);
2512 		return 1;
2513 	}
2514 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2515 
2516 reject:
2517 	xfrm_secpath_reject(xerr_idx, skb, &fl);
2518 reject_error:
2519 	xfrm_pols_put(pols, npols);
2520 	return 0;
2521 }
2522 EXPORT_SYMBOL(__xfrm_policy_check);
2523 
2524 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2525 {
2526 	struct net *net = dev_net(skb->dev);
2527 	struct flowi fl;
2528 	struct dst_entry *dst;
2529 	int res = 1;
2530 
2531 	if (xfrm_decode_session(skb, &fl, family) < 0) {
2532 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2533 		return 0;
2534 	}
2535 
2536 	skb_dst_force(skb);
2537 
2538 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2539 	if (IS_ERR(dst)) {
2540 		res = 0;
2541 		dst = NULL;
2542 	}
2543 	skb_dst_set(skb, dst);
2544 	return res;
2545 }
2546 EXPORT_SYMBOL(__xfrm_route_forward);
2547 
2548 /* Optimize later using cookies and generation ids. */
2549 
2550 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2551 {
2552 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2553 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2554 	 * get validated by dst_ops->check on every use.  We do this
2555 	 * because when a normal route referenced by an XFRM dst is
2556 	 * obsoleted we do not go looking around for all parent
2557 	 * referencing XFRM dsts so that we can invalidate them.  It
2558 	 * is just too much work.  Instead we make the checks here on
2559 	 * every use.  For example:
2560 	 *
2561 	 *	XFRM dst A --> IPv4 dst X
2562 	 *
2563 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2564 	 * in this example).  If X is marked obsolete, "A" will not
2565 	 * notice.  That's what we are validating here via the
2566 	 * stale_bundle() check.
2567 	 *
2568 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2569 	 * be marked on it.
2570 	 * This will force stale_bundle() to fail on any xdst bundle with
2571 	 * this dst linked in it.
2572 	 */
2573 	if (dst->obsolete < 0 && !stale_bundle(dst))
2574 		return dst;
2575 
2576 	return NULL;
2577 }
2578 
2579 static int stale_bundle(struct dst_entry *dst)
2580 {
2581 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2582 }
2583 
2584 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2585 {
2586 	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2587 		dst->dev = dev_net(dev)->loopback_dev;
2588 		dev_hold(dst->dev);
2589 		dev_put(dev);
2590 	}
2591 }
2592 EXPORT_SYMBOL(xfrm_dst_ifdown);
2593 
2594 static void xfrm_link_failure(struct sk_buff *skb)
2595 {
2596 	/* Impossible. Such dst must be popped before reaches point of failure. */
2597 }
2598 
2599 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2600 {
2601 	if (dst) {
2602 		if (dst->obsolete) {
2603 			dst_release(dst);
2604 			dst = NULL;
2605 		}
2606 	}
2607 	return dst;
2608 }
2609 
2610 static void xfrm_init_pmtu(struct dst_entry *dst)
2611 {
2612 	do {
2613 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2614 		u32 pmtu, route_mtu_cached;
2615 
2616 		pmtu = dst_mtu(dst->child);
2617 		xdst->child_mtu_cached = pmtu;
2618 
2619 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2620 
2621 		route_mtu_cached = dst_mtu(xdst->route);
2622 		xdst->route_mtu_cached = route_mtu_cached;
2623 
2624 		if (pmtu > route_mtu_cached)
2625 			pmtu = route_mtu_cached;
2626 
2627 		dst_metric_set(dst, RTAX_MTU, pmtu);
2628 	} while ((dst = dst->next));
2629 }
2630 
2631 /* Check that the bundle accepts the flow and its components are
2632  * still valid.
2633  */
2634 
2635 static int xfrm_bundle_ok(struct xfrm_dst *first)
2636 {
2637 	struct dst_entry *dst = &first->u.dst;
2638 	struct xfrm_dst *last;
2639 	u32 mtu;
2640 
2641 	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2642 	    (dst->dev && !netif_running(dst->dev)))
2643 		return 0;
2644 
2645 	if (dst->flags & DST_XFRM_QUEUE)
2646 		return 1;
2647 
2648 	last = NULL;
2649 
2650 	do {
2651 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2652 
2653 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2654 			return 0;
2655 		if (xdst->xfrm_genid != dst->xfrm->genid)
2656 			return 0;
2657 		if (xdst->num_pols > 0 &&
2658 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2659 			return 0;
2660 
2661 		mtu = dst_mtu(dst->child);
2662 		if (xdst->child_mtu_cached != mtu) {
2663 			last = xdst;
2664 			xdst->child_mtu_cached = mtu;
2665 		}
2666 
2667 		if (!dst_check(xdst->route, xdst->route_cookie))
2668 			return 0;
2669 		mtu = dst_mtu(xdst->route);
2670 		if (xdst->route_mtu_cached != mtu) {
2671 			last = xdst;
2672 			xdst->route_mtu_cached = mtu;
2673 		}
2674 
2675 		dst = dst->child;
2676 	} while (dst->xfrm);
2677 
2678 	if (likely(!last))
2679 		return 1;
2680 
2681 	mtu = last->child_mtu_cached;
2682 	for (;;) {
2683 		dst = &last->u.dst;
2684 
2685 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2686 		if (mtu > last->route_mtu_cached)
2687 			mtu = last->route_mtu_cached;
2688 		dst_metric_set(dst, RTAX_MTU, mtu);
2689 
2690 		if (last == first)
2691 			break;
2692 
2693 		last = (struct xfrm_dst *)last->u.dst.next;
2694 		last->child_mtu_cached = mtu;
2695 	}
2696 
2697 	return 1;
2698 }
2699 
2700 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2701 {
2702 	return dst_metric_advmss(dst->path);
2703 }
2704 
2705 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2706 {
2707 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2708 
2709 	return mtu ? : dst_mtu(dst->path);
2710 }
2711 
2712 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2713 					const void *daddr)
2714 {
2715 	const struct dst_entry *path = dst->path;
2716 
2717 	for (; dst != path; dst = dst->child) {
2718 		const struct xfrm_state *xfrm = dst->xfrm;
2719 
2720 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2721 			continue;
2722 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2723 			daddr = xfrm->coaddr;
2724 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2725 			daddr = &xfrm->id.daddr;
2726 	}
2727 	return daddr;
2728 }
2729 
2730 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2731 					   struct sk_buff *skb,
2732 					   const void *daddr)
2733 {
2734 	const struct dst_entry *path = dst->path;
2735 
2736 	if (!skb)
2737 		daddr = xfrm_get_dst_nexthop(dst, daddr);
2738 	return path->ops->neigh_lookup(path, skb, daddr);
2739 }
2740 
2741 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2742 {
2743 	const struct dst_entry *path = dst->path;
2744 
2745 	daddr = xfrm_get_dst_nexthop(dst, daddr);
2746 	path->ops->confirm_neigh(path, daddr);
2747 }
2748 
2749 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2750 {
2751 	int err = 0;
2752 
2753 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2754 		return -EAFNOSUPPORT;
2755 
2756 	spin_lock(&xfrm_policy_afinfo_lock);
2757 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
2758 		err = -EEXIST;
2759 	else {
2760 		struct dst_ops *dst_ops = afinfo->dst_ops;
2761 		if (likely(dst_ops->kmem_cachep == NULL))
2762 			dst_ops->kmem_cachep = xfrm_dst_cache;
2763 		if (likely(dst_ops->check == NULL))
2764 			dst_ops->check = xfrm_dst_check;
2765 		if (likely(dst_ops->default_advmss == NULL))
2766 			dst_ops->default_advmss = xfrm_default_advmss;
2767 		if (likely(dst_ops->mtu == NULL))
2768 			dst_ops->mtu = xfrm_mtu;
2769 		if (likely(dst_ops->negative_advice == NULL))
2770 			dst_ops->negative_advice = xfrm_negative_advice;
2771 		if (likely(dst_ops->link_failure == NULL))
2772 			dst_ops->link_failure = xfrm_link_failure;
2773 		if (likely(dst_ops->neigh_lookup == NULL))
2774 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2775 		if (likely(!dst_ops->confirm_neigh))
2776 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
2777 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2778 	}
2779 	spin_unlock(&xfrm_policy_afinfo_lock);
2780 
2781 	return err;
2782 }
2783 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2784 
2785 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2786 {
2787 	struct dst_ops *dst_ops = afinfo->dst_ops;
2788 	int i;
2789 
2790 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2791 		if (xfrm_policy_afinfo[i] != afinfo)
2792 			continue;
2793 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2794 		break;
2795 	}
2796 
2797 	synchronize_rcu();
2798 
2799 	dst_ops->kmem_cachep = NULL;
2800 	dst_ops->check = NULL;
2801 	dst_ops->negative_advice = NULL;
2802 	dst_ops->link_failure = NULL;
2803 }
2804 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2805 
2806 #ifdef CONFIG_XFRM_STATISTICS
2807 static int __net_init xfrm_statistics_init(struct net *net)
2808 {
2809 	int rv;
2810 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2811 	if (!net->mib.xfrm_statistics)
2812 		return -ENOMEM;
2813 	rv = xfrm_proc_init(net);
2814 	if (rv < 0)
2815 		free_percpu(net->mib.xfrm_statistics);
2816 	return rv;
2817 }
2818 
2819 static void xfrm_statistics_fini(struct net *net)
2820 {
2821 	xfrm_proc_fini(net);
2822 	free_percpu(net->mib.xfrm_statistics);
2823 }
2824 #else
2825 static int __net_init xfrm_statistics_init(struct net *net)
2826 {
2827 	return 0;
2828 }
2829 
2830 static void xfrm_statistics_fini(struct net *net)
2831 {
2832 }
2833 #endif
2834 
2835 static int __net_init xfrm_policy_init(struct net *net)
2836 {
2837 	unsigned int hmask, sz;
2838 	int dir;
2839 
2840 	if (net_eq(net, &init_net))
2841 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2842 					   sizeof(struct xfrm_dst),
2843 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2844 					   NULL);
2845 
2846 	hmask = 8 - 1;
2847 	sz = (hmask+1) * sizeof(struct hlist_head);
2848 
2849 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2850 	if (!net->xfrm.policy_byidx)
2851 		goto out_byidx;
2852 	net->xfrm.policy_idx_hmask = hmask;
2853 
2854 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2855 		struct xfrm_policy_hash *htab;
2856 
2857 		net->xfrm.policy_count[dir] = 0;
2858 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2859 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2860 
2861 		htab = &net->xfrm.policy_bydst[dir];
2862 		htab->table = xfrm_hash_alloc(sz);
2863 		if (!htab->table)
2864 			goto out_bydst;
2865 		htab->hmask = hmask;
2866 		htab->dbits4 = 32;
2867 		htab->sbits4 = 32;
2868 		htab->dbits6 = 128;
2869 		htab->sbits6 = 128;
2870 	}
2871 	net->xfrm.policy_hthresh.lbits4 = 32;
2872 	net->xfrm.policy_hthresh.rbits4 = 32;
2873 	net->xfrm.policy_hthresh.lbits6 = 128;
2874 	net->xfrm.policy_hthresh.rbits6 = 128;
2875 
2876 	seqlock_init(&net->xfrm.policy_hthresh.lock);
2877 
2878 	INIT_LIST_HEAD(&net->xfrm.policy_all);
2879 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2880 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2881 	if (net_eq(net, &init_net))
2882 		xfrm_dev_init();
2883 	return 0;
2884 
2885 out_bydst:
2886 	for (dir--; dir >= 0; dir--) {
2887 		struct xfrm_policy_hash *htab;
2888 
2889 		htab = &net->xfrm.policy_bydst[dir];
2890 		xfrm_hash_free(htab->table, sz);
2891 	}
2892 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2893 out_byidx:
2894 	return -ENOMEM;
2895 }
2896 
2897 static void xfrm_policy_fini(struct net *net)
2898 {
2899 	unsigned int sz;
2900 	int dir;
2901 
2902 	flush_work(&net->xfrm.policy_hash_work);
2903 #ifdef CONFIG_XFRM_SUB_POLICY
2904 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2905 #endif
2906 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2907 
2908 	WARN_ON(!list_empty(&net->xfrm.policy_all));
2909 
2910 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2911 		struct xfrm_policy_hash *htab;
2912 
2913 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2914 
2915 		htab = &net->xfrm.policy_bydst[dir];
2916 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2917 		WARN_ON(!hlist_empty(htab->table));
2918 		xfrm_hash_free(htab->table, sz);
2919 	}
2920 
2921 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2922 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2923 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2924 }
2925 
2926 static int __net_init xfrm_net_init(struct net *net)
2927 {
2928 	int rv;
2929 
2930 	/* Initialize the per-net locks here */
2931 	spin_lock_init(&net->xfrm.xfrm_state_lock);
2932 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
2933 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
2934 
2935 	rv = xfrm_statistics_init(net);
2936 	if (rv < 0)
2937 		goto out_statistics;
2938 	rv = xfrm_state_init(net);
2939 	if (rv < 0)
2940 		goto out_state;
2941 	rv = xfrm_policy_init(net);
2942 	if (rv < 0)
2943 		goto out_policy;
2944 	rv = xfrm_sysctl_init(net);
2945 	if (rv < 0)
2946 		goto out_sysctl;
2947 
2948 	return 0;
2949 
2950 out_sysctl:
2951 	xfrm_policy_fini(net);
2952 out_policy:
2953 	xfrm_state_fini(net);
2954 out_state:
2955 	xfrm_statistics_fini(net);
2956 out_statistics:
2957 	return rv;
2958 }
2959 
2960 static void __net_exit xfrm_net_exit(struct net *net)
2961 {
2962 	xfrm_sysctl_fini(net);
2963 	xfrm_policy_fini(net);
2964 	xfrm_state_fini(net);
2965 	xfrm_statistics_fini(net);
2966 }
2967 
2968 static struct pernet_operations __net_initdata xfrm_net_ops = {
2969 	.init = xfrm_net_init,
2970 	.exit = xfrm_net_exit,
2971 };
2972 
2973 void __init xfrm_init(void)
2974 {
2975 	int i;
2976 
2977 	xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work),
2978 				       GFP_KERNEL);
2979 	BUG_ON(!xfrm_pcpu_work);
2980 
2981 	for (i = 0; i < NR_CPUS; i++)
2982 		INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn);
2983 
2984 	register_pernet_subsys(&xfrm_net_ops);
2985 	seqcount_init(&xfrm_policy_hash_generation);
2986 	xfrm_input_init();
2987 }
2988 
2989 #ifdef CONFIG_AUDITSYSCALL
2990 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2991 					 struct audit_buffer *audit_buf)
2992 {
2993 	struct xfrm_sec_ctx *ctx = xp->security;
2994 	struct xfrm_selector *sel = &xp->selector;
2995 
2996 	if (ctx)
2997 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2998 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2999 
3000 	switch (sel->family) {
3001 	case AF_INET:
3002 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
3003 		if (sel->prefixlen_s != 32)
3004 			audit_log_format(audit_buf, " src_prefixlen=%d",
3005 					 sel->prefixlen_s);
3006 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
3007 		if (sel->prefixlen_d != 32)
3008 			audit_log_format(audit_buf, " dst_prefixlen=%d",
3009 					 sel->prefixlen_d);
3010 		break;
3011 	case AF_INET6:
3012 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
3013 		if (sel->prefixlen_s != 128)
3014 			audit_log_format(audit_buf, " src_prefixlen=%d",
3015 					 sel->prefixlen_s);
3016 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
3017 		if (sel->prefixlen_d != 128)
3018 			audit_log_format(audit_buf, " dst_prefixlen=%d",
3019 					 sel->prefixlen_d);
3020 		break;
3021 	}
3022 }
3023 
3024 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
3025 {
3026 	struct audit_buffer *audit_buf;
3027 
3028 	audit_buf = xfrm_audit_start("SPD-add");
3029 	if (audit_buf == NULL)
3030 		return;
3031 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3032 	audit_log_format(audit_buf, " res=%u", result);
3033 	xfrm_audit_common_policyinfo(xp, audit_buf);
3034 	audit_log_end(audit_buf);
3035 }
3036 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3037 
3038 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3039 			      bool task_valid)
3040 {
3041 	struct audit_buffer *audit_buf;
3042 
3043 	audit_buf = xfrm_audit_start("SPD-delete");
3044 	if (audit_buf == NULL)
3045 		return;
3046 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3047 	audit_log_format(audit_buf, " res=%u", result);
3048 	xfrm_audit_common_policyinfo(xp, audit_buf);
3049 	audit_log_end(audit_buf);
3050 }
3051 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3052 #endif
3053 
3054 #ifdef CONFIG_XFRM_MIGRATE
3055 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3056 					const struct xfrm_selector *sel_tgt)
3057 {
3058 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3059 		if (sel_tgt->family == sel_cmp->family &&
3060 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3061 				    sel_cmp->family) &&
3062 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3063 				    sel_cmp->family) &&
3064 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3065 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3066 			return true;
3067 		}
3068 	} else {
3069 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3070 			return true;
3071 		}
3072 	}
3073 	return false;
3074 }
3075 
3076 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3077 						    u8 dir, u8 type, struct net *net)
3078 {
3079 	struct xfrm_policy *pol, *ret = NULL;
3080 	struct hlist_head *chain;
3081 	u32 priority = ~0U;
3082 
3083 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3084 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3085 	hlist_for_each_entry(pol, chain, bydst) {
3086 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3087 		    pol->type == type) {
3088 			ret = pol;
3089 			priority = ret->priority;
3090 			break;
3091 		}
3092 	}
3093 	chain = &net->xfrm.policy_inexact[dir];
3094 	hlist_for_each_entry(pol, chain, bydst) {
3095 		if ((pol->priority >= priority) && ret)
3096 			break;
3097 
3098 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3099 		    pol->type == type) {
3100 			ret = pol;
3101 			break;
3102 		}
3103 	}
3104 
3105 	xfrm_pol_hold(ret);
3106 
3107 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3108 
3109 	return ret;
3110 }
3111 
3112 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3113 {
3114 	int match = 0;
3115 
3116 	if (t->mode == m->mode && t->id.proto == m->proto &&
3117 	    (m->reqid == 0 || t->reqid == m->reqid)) {
3118 		switch (t->mode) {
3119 		case XFRM_MODE_TUNNEL:
3120 		case XFRM_MODE_BEET:
3121 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3122 					    m->old_family) &&
3123 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3124 					    m->old_family)) {
3125 				match = 1;
3126 			}
3127 			break;
3128 		case XFRM_MODE_TRANSPORT:
3129 			/* in case of transport mode, template does not store
3130 			   any IP addresses, hence we just compare mode and
3131 			   protocol */
3132 			match = 1;
3133 			break;
3134 		default:
3135 			break;
3136 		}
3137 	}
3138 	return match;
3139 }
3140 
3141 /* update endpoint address(es) of template(s) */
3142 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3143 			       struct xfrm_migrate *m, int num_migrate)
3144 {
3145 	struct xfrm_migrate *mp;
3146 	int i, j, n = 0;
3147 
3148 	write_lock_bh(&pol->lock);
3149 	if (unlikely(pol->walk.dead)) {
3150 		/* target policy has been deleted */
3151 		write_unlock_bh(&pol->lock);
3152 		return -ENOENT;
3153 	}
3154 
3155 	for (i = 0; i < pol->xfrm_nr; i++) {
3156 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3157 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3158 				continue;
3159 			n++;
3160 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3161 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3162 				continue;
3163 			/* update endpoints */
3164 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3165 			       sizeof(pol->xfrm_vec[i].id.daddr));
3166 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3167 			       sizeof(pol->xfrm_vec[i].saddr));
3168 			pol->xfrm_vec[i].encap_family = mp->new_family;
3169 			/* flush bundles */
3170 			atomic_inc(&pol->genid);
3171 		}
3172 	}
3173 
3174 	write_unlock_bh(&pol->lock);
3175 
3176 	if (!n)
3177 		return -ENODATA;
3178 
3179 	return 0;
3180 }
3181 
3182 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3183 {
3184 	int i, j;
3185 
3186 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3187 		return -EINVAL;
3188 
3189 	for (i = 0; i < num_migrate; i++) {
3190 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3191 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3192 			return -EINVAL;
3193 
3194 		/* check if there is any duplicated entry */
3195 		for (j = i + 1; j < num_migrate; j++) {
3196 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3197 				    sizeof(m[i].old_daddr)) &&
3198 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3199 				    sizeof(m[i].old_saddr)) &&
3200 			    m[i].proto == m[j].proto &&
3201 			    m[i].mode == m[j].mode &&
3202 			    m[i].reqid == m[j].reqid &&
3203 			    m[i].old_family == m[j].old_family)
3204 				return -EINVAL;
3205 		}
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3212 		 struct xfrm_migrate *m, int num_migrate,
3213 		 struct xfrm_kmaddress *k, struct net *net,
3214 		 struct xfrm_encap_tmpl *encap)
3215 {
3216 	int i, err, nx_cur = 0, nx_new = 0;
3217 	struct xfrm_policy *pol = NULL;
3218 	struct xfrm_state *x, *xc;
3219 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3220 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3221 	struct xfrm_migrate *mp;
3222 
3223 	/* Stage 0 - sanity checks */
3224 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3225 		goto out;
3226 
3227 	if (dir >= XFRM_POLICY_MAX) {
3228 		err = -EINVAL;
3229 		goto out;
3230 	}
3231 
3232 	/* Stage 1 - find policy */
3233 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3234 		err = -ENOENT;
3235 		goto out;
3236 	}
3237 
3238 	/* Stage 2 - find and update state(s) */
3239 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3240 		if ((x = xfrm_migrate_state_find(mp, net))) {
3241 			x_cur[nx_cur] = x;
3242 			nx_cur++;
3243 			xc = xfrm_state_migrate(x, mp, encap);
3244 			if (xc) {
3245 				x_new[nx_new] = xc;
3246 				nx_new++;
3247 			} else {
3248 				err = -ENODATA;
3249 				goto restore_state;
3250 			}
3251 		}
3252 	}
3253 
3254 	/* Stage 3 - update policy */
3255 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3256 		goto restore_state;
3257 
3258 	/* Stage 4 - delete old state(s) */
3259 	if (nx_cur) {
3260 		xfrm_states_put(x_cur, nx_cur);
3261 		xfrm_states_delete(x_cur, nx_cur);
3262 	}
3263 
3264 	/* Stage 5 - announce */
3265 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
3266 
3267 	xfrm_pol_put(pol);
3268 
3269 	return 0;
3270 out:
3271 	return err;
3272 
3273 restore_state:
3274 	if (pol)
3275 		xfrm_pol_put(pol);
3276 	if (nx_cur)
3277 		xfrm_states_put(x_cur, nx_cur);
3278 	if (nx_new)
3279 		xfrm_states_delete(x_new, nx_new);
3280 
3281 	return err;
3282 }
3283 EXPORT_SYMBOL(xfrm_migrate);
3284 #endif
3285