xref: /openbmc/linux/net/xfrm/xfrm_policy.c (revision e285d5bf)
1 /*
2  * xfrm_policy.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	Kazunori MIYAZAWA @USAGI
10  * 	YOSHIFUJI Hideaki
11  * 		Split up af-specific portion
12  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13  *
14  */
15 
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/cpu.h>
28 #include <linux/audit.h>
29 #include <net/dst.h>
30 #include <net/flow.h>
31 #include <net/xfrm.h>
32 #include <net/ip.h>
33 #ifdef CONFIG_XFRM_STATISTICS
34 #include <net/snmp.h>
35 #endif
36 
37 #include "xfrm_hash.h"
38 
39 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
40 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
41 #define XFRM_MAX_QUEUE_LEN	100
42 
43 struct xfrm_flo {
44 	struct dst_entry *dst_orig;
45 	u8 flags;
46 };
47 
48 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
49 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
50 
51 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
52 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
53 						__read_mostly;
54 
55 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
56 static __read_mostly seqcount_t xfrm_policy_hash_generation;
57 
58 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
59 static int stale_bundle(struct dst_entry *dst);
60 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
61 static void xfrm_policy_queue_process(struct timer_list *t);
62 
63 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
64 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
65 						int dir);
66 
67 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
68 {
69 	return refcount_inc_not_zero(&policy->refcnt);
70 }
71 
72 static inline bool
73 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
74 {
75 	const struct flowi4 *fl4 = &fl->u.ip4;
76 
77 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
78 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
79 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
80 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
81 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
82 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
83 }
84 
85 static inline bool
86 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
87 {
88 	const struct flowi6 *fl6 = &fl->u.ip6;
89 
90 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
91 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
92 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
93 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
94 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
95 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
96 }
97 
98 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
99 			 unsigned short family)
100 {
101 	switch (family) {
102 	case AF_INET:
103 		return __xfrm4_selector_match(sel, fl);
104 	case AF_INET6:
105 		return __xfrm6_selector_match(sel, fl);
106 	}
107 	return false;
108 }
109 
110 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
111 {
112 	const struct xfrm_policy_afinfo *afinfo;
113 
114 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
115 		return NULL;
116 	rcu_read_lock();
117 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
118 	if (unlikely(!afinfo))
119 		rcu_read_unlock();
120 	return afinfo;
121 }
122 
123 /* Called with rcu_read_lock(). */
124 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
125 {
126 	return rcu_dereference(xfrm_if_cb);
127 }
128 
129 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
130 				    const xfrm_address_t *saddr,
131 				    const xfrm_address_t *daddr,
132 				    int family, u32 mark)
133 {
134 	const struct xfrm_policy_afinfo *afinfo;
135 	struct dst_entry *dst;
136 
137 	afinfo = xfrm_policy_get_afinfo(family);
138 	if (unlikely(afinfo == NULL))
139 		return ERR_PTR(-EAFNOSUPPORT);
140 
141 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
142 
143 	rcu_read_unlock();
144 
145 	return dst;
146 }
147 EXPORT_SYMBOL(__xfrm_dst_lookup);
148 
149 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
150 						int tos, int oif,
151 						xfrm_address_t *prev_saddr,
152 						xfrm_address_t *prev_daddr,
153 						int family, u32 mark)
154 {
155 	struct net *net = xs_net(x);
156 	xfrm_address_t *saddr = &x->props.saddr;
157 	xfrm_address_t *daddr = &x->id.daddr;
158 	struct dst_entry *dst;
159 
160 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
161 		saddr = x->coaddr;
162 		daddr = prev_daddr;
163 	}
164 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
165 		saddr = prev_saddr;
166 		daddr = x->coaddr;
167 	}
168 
169 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
170 
171 	if (!IS_ERR(dst)) {
172 		if (prev_saddr != saddr)
173 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
174 		if (prev_daddr != daddr)
175 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
176 	}
177 
178 	return dst;
179 }
180 
181 static inline unsigned long make_jiffies(long secs)
182 {
183 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
184 		return MAX_SCHEDULE_TIMEOUT-1;
185 	else
186 		return secs*HZ;
187 }
188 
189 static void xfrm_policy_timer(struct timer_list *t)
190 {
191 	struct xfrm_policy *xp = from_timer(xp, t, timer);
192 	time64_t now = ktime_get_real_seconds();
193 	time64_t next = TIME64_MAX;
194 	int warn = 0;
195 	int dir;
196 
197 	read_lock(&xp->lock);
198 
199 	if (unlikely(xp->walk.dead))
200 		goto out;
201 
202 	dir = xfrm_policy_id2dir(xp->index);
203 
204 	if (xp->lft.hard_add_expires_seconds) {
205 		time64_t tmo = xp->lft.hard_add_expires_seconds +
206 			xp->curlft.add_time - now;
207 		if (tmo <= 0)
208 			goto expired;
209 		if (tmo < next)
210 			next = tmo;
211 	}
212 	if (xp->lft.hard_use_expires_seconds) {
213 		time64_t tmo = xp->lft.hard_use_expires_seconds +
214 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
215 		if (tmo <= 0)
216 			goto expired;
217 		if (tmo < next)
218 			next = tmo;
219 	}
220 	if (xp->lft.soft_add_expires_seconds) {
221 		time64_t tmo = xp->lft.soft_add_expires_seconds +
222 			xp->curlft.add_time - now;
223 		if (tmo <= 0) {
224 			warn = 1;
225 			tmo = XFRM_KM_TIMEOUT;
226 		}
227 		if (tmo < next)
228 			next = tmo;
229 	}
230 	if (xp->lft.soft_use_expires_seconds) {
231 		time64_t tmo = xp->lft.soft_use_expires_seconds +
232 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
233 		if (tmo <= 0) {
234 			warn = 1;
235 			tmo = XFRM_KM_TIMEOUT;
236 		}
237 		if (tmo < next)
238 			next = tmo;
239 	}
240 
241 	if (warn)
242 		km_policy_expired(xp, dir, 0, 0);
243 	if (next != TIME64_MAX &&
244 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
245 		xfrm_pol_hold(xp);
246 
247 out:
248 	read_unlock(&xp->lock);
249 	xfrm_pol_put(xp);
250 	return;
251 
252 expired:
253 	read_unlock(&xp->lock);
254 	if (!xfrm_policy_delete(xp, dir))
255 		km_policy_expired(xp, dir, 1, 0);
256 	xfrm_pol_put(xp);
257 }
258 
259 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
260  * SPD calls.
261  */
262 
263 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
264 {
265 	struct xfrm_policy *policy;
266 
267 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
268 
269 	if (policy) {
270 		write_pnet(&policy->xp_net, net);
271 		INIT_LIST_HEAD(&policy->walk.all);
272 		INIT_HLIST_NODE(&policy->bydst);
273 		INIT_HLIST_NODE(&policy->byidx);
274 		rwlock_init(&policy->lock);
275 		refcount_set(&policy->refcnt, 1);
276 		skb_queue_head_init(&policy->polq.hold_queue);
277 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
278 		timer_setup(&policy->polq.hold_timer,
279 			    xfrm_policy_queue_process, 0);
280 	}
281 	return policy;
282 }
283 EXPORT_SYMBOL(xfrm_policy_alloc);
284 
285 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
286 {
287 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
288 
289 	security_xfrm_policy_free(policy->security);
290 	kfree(policy);
291 }
292 
293 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
294 
295 void xfrm_policy_destroy(struct xfrm_policy *policy)
296 {
297 	BUG_ON(!policy->walk.dead);
298 
299 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
300 		BUG();
301 
302 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
303 }
304 EXPORT_SYMBOL(xfrm_policy_destroy);
305 
306 /* Rule must be locked. Release descendant resources, announce
307  * entry dead. The rule must be unlinked from lists to the moment.
308  */
309 
310 static void xfrm_policy_kill(struct xfrm_policy *policy)
311 {
312 	policy->walk.dead = 1;
313 
314 	atomic_inc(&policy->genid);
315 
316 	if (del_timer(&policy->polq.hold_timer))
317 		xfrm_pol_put(policy);
318 	skb_queue_purge(&policy->polq.hold_queue);
319 
320 	if (del_timer(&policy->timer))
321 		xfrm_pol_put(policy);
322 
323 	xfrm_pol_put(policy);
324 }
325 
326 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
327 
328 static inline unsigned int idx_hash(struct net *net, u32 index)
329 {
330 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
331 }
332 
333 /* calculate policy hash thresholds */
334 static void __get_hash_thresh(struct net *net,
335 			      unsigned short family, int dir,
336 			      u8 *dbits, u8 *sbits)
337 {
338 	switch (family) {
339 	case AF_INET:
340 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
341 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
342 		break;
343 
344 	case AF_INET6:
345 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
346 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
347 		break;
348 
349 	default:
350 		*dbits = 0;
351 		*sbits = 0;
352 	}
353 }
354 
355 static struct hlist_head *policy_hash_bysel(struct net *net,
356 					    const struct xfrm_selector *sel,
357 					    unsigned short family, int dir)
358 {
359 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
360 	unsigned int hash;
361 	u8 dbits;
362 	u8 sbits;
363 
364 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
365 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
366 
367 	if (hash == hmask + 1)
368 		return &net->xfrm.policy_inexact[dir];
369 
370 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
371 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
372 }
373 
374 static struct hlist_head *policy_hash_direct(struct net *net,
375 					     const xfrm_address_t *daddr,
376 					     const xfrm_address_t *saddr,
377 					     unsigned short family, int dir)
378 {
379 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
380 	unsigned int hash;
381 	u8 dbits;
382 	u8 sbits;
383 
384 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
385 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
386 
387 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
388 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
389 }
390 
391 static void xfrm_dst_hash_transfer(struct net *net,
392 				   struct hlist_head *list,
393 				   struct hlist_head *ndsttable,
394 				   unsigned int nhashmask,
395 				   int dir)
396 {
397 	struct hlist_node *tmp, *entry0 = NULL;
398 	struct xfrm_policy *pol;
399 	unsigned int h0 = 0;
400 	u8 dbits;
401 	u8 sbits;
402 
403 redo:
404 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
405 		unsigned int h;
406 
407 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
408 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
409 				pol->family, nhashmask, dbits, sbits);
410 		if (!entry0) {
411 			hlist_del_rcu(&pol->bydst);
412 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
413 			h0 = h;
414 		} else {
415 			if (h != h0)
416 				continue;
417 			hlist_del_rcu(&pol->bydst);
418 			hlist_add_behind_rcu(&pol->bydst, entry0);
419 		}
420 		entry0 = &pol->bydst;
421 	}
422 	if (!hlist_empty(list)) {
423 		entry0 = NULL;
424 		goto redo;
425 	}
426 }
427 
428 static void xfrm_idx_hash_transfer(struct hlist_head *list,
429 				   struct hlist_head *nidxtable,
430 				   unsigned int nhashmask)
431 {
432 	struct hlist_node *tmp;
433 	struct xfrm_policy *pol;
434 
435 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
436 		unsigned int h;
437 
438 		h = __idx_hash(pol->index, nhashmask);
439 		hlist_add_head(&pol->byidx, nidxtable+h);
440 	}
441 }
442 
443 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
444 {
445 	return ((old_hmask + 1) << 1) - 1;
446 }
447 
448 static void xfrm_bydst_resize(struct net *net, int dir)
449 {
450 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
451 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
452 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
453 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
454 	struct hlist_head *odst;
455 	int i;
456 
457 	if (!ndst)
458 		return;
459 
460 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
461 	write_seqcount_begin(&xfrm_policy_hash_generation);
462 
463 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
464 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
465 
466 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
467 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
468 
469 	for (i = hmask; i >= 0; i--)
470 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
471 
472 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
473 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
474 
475 	write_seqcount_end(&xfrm_policy_hash_generation);
476 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
477 
478 	synchronize_rcu();
479 
480 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
481 }
482 
483 static void xfrm_byidx_resize(struct net *net, int total)
484 {
485 	unsigned int hmask = net->xfrm.policy_idx_hmask;
486 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
487 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
488 	struct hlist_head *oidx = net->xfrm.policy_byidx;
489 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
490 	int i;
491 
492 	if (!nidx)
493 		return;
494 
495 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
496 
497 	for (i = hmask; i >= 0; i--)
498 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
499 
500 	net->xfrm.policy_byidx = nidx;
501 	net->xfrm.policy_idx_hmask = nhashmask;
502 
503 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
504 
505 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
506 }
507 
508 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
509 {
510 	unsigned int cnt = net->xfrm.policy_count[dir];
511 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
512 
513 	if (total)
514 		*total += cnt;
515 
516 	if ((hmask + 1) < xfrm_policy_hashmax &&
517 	    cnt > hmask)
518 		return 1;
519 
520 	return 0;
521 }
522 
523 static inline int xfrm_byidx_should_resize(struct net *net, int total)
524 {
525 	unsigned int hmask = net->xfrm.policy_idx_hmask;
526 
527 	if ((hmask + 1) < xfrm_policy_hashmax &&
528 	    total > hmask)
529 		return 1;
530 
531 	return 0;
532 }
533 
534 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
535 {
536 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
537 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
538 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
539 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
540 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
541 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
542 	si->spdhcnt = net->xfrm.policy_idx_hmask;
543 	si->spdhmcnt = xfrm_policy_hashmax;
544 }
545 EXPORT_SYMBOL(xfrm_spd_getinfo);
546 
547 static DEFINE_MUTEX(hash_resize_mutex);
548 static void xfrm_hash_resize(struct work_struct *work)
549 {
550 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
551 	int dir, total;
552 
553 	mutex_lock(&hash_resize_mutex);
554 
555 	total = 0;
556 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
557 		if (xfrm_bydst_should_resize(net, dir, &total))
558 			xfrm_bydst_resize(net, dir);
559 	}
560 	if (xfrm_byidx_should_resize(net, total))
561 		xfrm_byidx_resize(net, total);
562 
563 	mutex_unlock(&hash_resize_mutex);
564 }
565 
566 static void xfrm_hash_rebuild(struct work_struct *work)
567 {
568 	struct net *net = container_of(work, struct net,
569 				       xfrm.policy_hthresh.work);
570 	unsigned int hmask;
571 	struct xfrm_policy *pol;
572 	struct xfrm_policy *policy;
573 	struct hlist_head *chain;
574 	struct hlist_head *odst;
575 	struct hlist_node *newpos;
576 	int i;
577 	int dir;
578 	unsigned seq;
579 	u8 lbits4, rbits4, lbits6, rbits6;
580 
581 	mutex_lock(&hash_resize_mutex);
582 
583 	/* read selector prefixlen thresholds */
584 	do {
585 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
586 
587 		lbits4 = net->xfrm.policy_hthresh.lbits4;
588 		rbits4 = net->xfrm.policy_hthresh.rbits4;
589 		lbits6 = net->xfrm.policy_hthresh.lbits6;
590 		rbits6 = net->xfrm.policy_hthresh.rbits6;
591 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
592 
593 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
594 
595 	/* reset the bydst and inexact table in all directions */
596 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
597 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
598 		hmask = net->xfrm.policy_bydst[dir].hmask;
599 		odst = net->xfrm.policy_bydst[dir].table;
600 		for (i = hmask; i >= 0; i--)
601 			INIT_HLIST_HEAD(odst + i);
602 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
603 			/* dir out => dst = remote, src = local */
604 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
605 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
606 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
607 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
608 		} else {
609 			/* dir in/fwd => dst = local, src = remote */
610 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
611 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
612 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
613 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
614 		}
615 	}
616 
617 	/* re-insert all policies by order of creation */
618 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
619 		if (policy->walk.dead ||
620 		    xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
621 			/* skip socket policies */
622 			continue;
623 		}
624 		newpos = NULL;
625 		chain = policy_hash_bysel(net, &policy->selector,
626 					  policy->family,
627 					  xfrm_policy_id2dir(policy->index));
628 		hlist_for_each_entry(pol, chain, bydst) {
629 			if (policy->priority >= pol->priority)
630 				newpos = &pol->bydst;
631 			else
632 				break;
633 		}
634 		if (newpos)
635 			hlist_add_behind(&policy->bydst, newpos);
636 		else
637 			hlist_add_head(&policy->bydst, chain);
638 	}
639 
640 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
641 
642 	mutex_unlock(&hash_resize_mutex);
643 }
644 
645 void xfrm_policy_hash_rebuild(struct net *net)
646 {
647 	schedule_work(&net->xfrm.policy_hthresh.work);
648 }
649 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
650 
651 /* Generate new index... KAME seems to generate them ordered by cost
652  * of an absolute inpredictability of ordering of rules. This will not pass. */
653 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
654 {
655 	static u32 idx_generator;
656 
657 	for (;;) {
658 		struct hlist_head *list;
659 		struct xfrm_policy *p;
660 		u32 idx;
661 		int found;
662 
663 		if (!index) {
664 			idx = (idx_generator | dir);
665 			idx_generator += 8;
666 		} else {
667 			idx = index;
668 			index = 0;
669 		}
670 
671 		if (idx == 0)
672 			idx = 8;
673 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
674 		found = 0;
675 		hlist_for_each_entry(p, list, byidx) {
676 			if (p->index == idx) {
677 				found = 1;
678 				break;
679 			}
680 		}
681 		if (!found)
682 			return idx;
683 	}
684 }
685 
686 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
687 {
688 	u32 *p1 = (u32 *) s1;
689 	u32 *p2 = (u32 *) s2;
690 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
691 	int i;
692 
693 	for (i = 0; i < len; i++) {
694 		if (p1[i] != p2[i])
695 			return 1;
696 	}
697 
698 	return 0;
699 }
700 
701 static void xfrm_policy_requeue(struct xfrm_policy *old,
702 				struct xfrm_policy *new)
703 {
704 	struct xfrm_policy_queue *pq = &old->polq;
705 	struct sk_buff_head list;
706 
707 	if (skb_queue_empty(&pq->hold_queue))
708 		return;
709 
710 	__skb_queue_head_init(&list);
711 
712 	spin_lock_bh(&pq->hold_queue.lock);
713 	skb_queue_splice_init(&pq->hold_queue, &list);
714 	if (del_timer(&pq->hold_timer))
715 		xfrm_pol_put(old);
716 	spin_unlock_bh(&pq->hold_queue.lock);
717 
718 	pq = &new->polq;
719 
720 	spin_lock_bh(&pq->hold_queue.lock);
721 	skb_queue_splice(&list, &pq->hold_queue);
722 	pq->timeout = XFRM_QUEUE_TMO_MIN;
723 	if (!mod_timer(&pq->hold_timer, jiffies))
724 		xfrm_pol_hold(new);
725 	spin_unlock_bh(&pq->hold_queue.lock);
726 }
727 
728 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
729 				   struct xfrm_policy *pol)
730 {
731 	u32 mark = policy->mark.v & policy->mark.m;
732 
733 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
734 		return true;
735 
736 	if ((mark & pol->mark.m) == pol->mark.v &&
737 	    policy->priority == pol->priority)
738 		return true;
739 
740 	return false;
741 }
742 
743 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
744 {
745 	struct net *net = xp_net(policy);
746 	struct xfrm_policy *pol;
747 	struct xfrm_policy *delpol;
748 	struct hlist_head *chain;
749 	struct hlist_node *newpos;
750 
751 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
752 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
753 	delpol = NULL;
754 	newpos = NULL;
755 	hlist_for_each_entry(pol, chain, bydst) {
756 		if (pol->type == policy->type &&
757 		    pol->if_id == policy->if_id &&
758 		    !selector_cmp(&pol->selector, &policy->selector) &&
759 		    xfrm_policy_mark_match(policy, pol) &&
760 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
761 		    !WARN_ON(delpol)) {
762 			if (excl) {
763 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
764 				return -EEXIST;
765 			}
766 			delpol = pol;
767 			if (policy->priority > pol->priority)
768 				continue;
769 		} else if (policy->priority >= pol->priority) {
770 			newpos = &pol->bydst;
771 			continue;
772 		}
773 		if (delpol)
774 			break;
775 	}
776 	if (newpos)
777 		hlist_add_behind(&policy->bydst, newpos);
778 	else
779 		hlist_add_head(&policy->bydst, chain);
780 	__xfrm_policy_link(policy, dir);
781 
782 	/* After previous checking, family can either be AF_INET or AF_INET6 */
783 	if (policy->family == AF_INET)
784 		rt_genid_bump_ipv4(net);
785 	else
786 		rt_genid_bump_ipv6(net);
787 
788 	if (delpol) {
789 		xfrm_policy_requeue(delpol, policy);
790 		__xfrm_policy_unlink(delpol, dir);
791 	}
792 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
793 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
794 	policy->curlft.add_time = ktime_get_real_seconds();
795 	policy->curlft.use_time = 0;
796 	if (!mod_timer(&policy->timer, jiffies + HZ))
797 		xfrm_pol_hold(policy);
798 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
799 
800 	if (delpol)
801 		xfrm_policy_kill(delpol);
802 	else if (xfrm_bydst_should_resize(net, dir, NULL))
803 		schedule_work(&net->xfrm.policy_hash_work);
804 
805 	return 0;
806 }
807 EXPORT_SYMBOL(xfrm_policy_insert);
808 
809 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
810 					  u8 type, int dir,
811 					  struct xfrm_selector *sel,
812 					  struct xfrm_sec_ctx *ctx, int delete,
813 					  int *err)
814 {
815 	struct xfrm_policy *pol, *ret;
816 	struct hlist_head *chain;
817 
818 	*err = 0;
819 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
820 	chain = policy_hash_bysel(net, sel, sel->family, dir);
821 	ret = NULL;
822 	hlist_for_each_entry(pol, chain, bydst) {
823 		if (pol->type == type &&
824 		    pol->if_id == if_id &&
825 		    (mark & pol->mark.m) == pol->mark.v &&
826 		    !selector_cmp(sel, &pol->selector) &&
827 		    xfrm_sec_ctx_match(ctx, pol->security)) {
828 			xfrm_pol_hold(pol);
829 			if (delete) {
830 				*err = security_xfrm_policy_delete(
831 								pol->security);
832 				if (*err) {
833 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
834 					return pol;
835 				}
836 				__xfrm_policy_unlink(pol, dir);
837 			}
838 			ret = pol;
839 			break;
840 		}
841 	}
842 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
843 
844 	if (ret && delete)
845 		xfrm_policy_kill(ret);
846 	return ret;
847 }
848 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
849 
850 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
851 				     u8 type, int dir, u32 id, int delete,
852 				     int *err)
853 {
854 	struct xfrm_policy *pol, *ret;
855 	struct hlist_head *chain;
856 
857 	*err = -ENOENT;
858 	if (xfrm_policy_id2dir(id) != dir)
859 		return NULL;
860 
861 	*err = 0;
862 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
863 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
864 	ret = NULL;
865 	hlist_for_each_entry(pol, chain, byidx) {
866 		if (pol->type == type && pol->index == id &&
867 		    pol->if_id == if_id &&
868 		    (mark & pol->mark.m) == pol->mark.v) {
869 			xfrm_pol_hold(pol);
870 			if (delete) {
871 				*err = security_xfrm_policy_delete(
872 								pol->security);
873 				if (*err) {
874 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
875 					return pol;
876 				}
877 				__xfrm_policy_unlink(pol, dir);
878 			}
879 			ret = pol;
880 			break;
881 		}
882 	}
883 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
884 
885 	if (ret && delete)
886 		xfrm_policy_kill(ret);
887 	return ret;
888 }
889 EXPORT_SYMBOL(xfrm_policy_byid);
890 
891 #ifdef CONFIG_SECURITY_NETWORK_XFRM
892 static inline int
893 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
894 {
895 	int dir, err = 0;
896 
897 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
898 		struct xfrm_policy *pol;
899 		int i;
900 
901 		hlist_for_each_entry(pol,
902 				     &net->xfrm.policy_inexact[dir], bydst) {
903 			if (pol->type != type)
904 				continue;
905 			err = security_xfrm_policy_delete(pol->security);
906 			if (err) {
907 				xfrm_audit_policy_delete(pol, 0, task_valid);
908 				return err;
909 			}
910 		}
911 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
912 			hlist_for_each_entry(pol,
913 					     net->xfrm.policy_bydst[dir].table + i,
914 					     bydst) {
915 				if (pol->type != type)
916 					continue;
917 				err = security_xfrm_policy_delete(
918 								pol->security);
919 				if (err) {
920 					xfrm_audit_policy_delete(pol, 0,
921 								 task_valid);
922 					return err;
923 				}
924 			}
925 		}
926 	}
927 	return err;
928 }
929 #else
930 static inline int
931 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
932 {
933 	return 0;
934 }
935 #endif
936 
937 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
938 {
939 	int dir, err = 0, cnt = 0;
940 
941 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
942 
943 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
944 	if (err)
945 		goto out;
946 
947 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
948 		struct xfrm_policy *pol;
949 		int i;
950 
951 	again1:
952 		hlist_for_each_entry(pol,
953 				     &net->xfrm.policy_inexact[dir], bydst) {
954 			if (pol->type != type)
955 				continue;
956 			__xfrm_policy_unlink(pol, dir);
957 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
958 			cnt++;
959 
960 			xfrm_audit_policy_delete(pol, 1, task_valid);
961 
962 			xfrm_policy_kill(pol);
963 
964 			spin_lock_bh(&net->xfrm.xfrm_policy_lock);
965 			goto again1;
966 		}
967 
968 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
969 	again2:
970 			hlist_for_each_entry(pol,
971 					     net->xfrm.policy_bydst[dir].table + i,
972 					     bydst) {
973 				if (pol->type != type)
974 					continue;
975 				__xfrm_policy_unlink(pol, dir);
976 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
977 				cnt++;
978 
979 				xfrm_audit_policy_delete(pol, 1, task_valid);
980 				xfrm_policy_kill(pol);
981 
982 				spin_lock_bh(&net->xfrm.xfrm_policy_lock);
983 				goto again2;
984 			}
985 		}
986 
987 	}
988 	if (!cnt)
989 		err = -ESRCH;
990 out:
991 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
992 	return err;
993 }
994 EXPORT_SYMBOL(xfrm_policy_flush);
995 
996 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
997 		     int (*func)(struct xfrm_policy *, int, int, void*),
998 		     void *data)
999 {
1000 	struct xfrm_policy *pol;
1001 	struct xfrm_policy_walk_entry *x;
1002 	int error = 0;
1003 
1004 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1005 	    walk->type != XFRM_POLICY_TYPE_ANY)
1006 		return -EINVAL;
1007 
1008 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1009 		return 0;
1010 
1011 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1012 	if (list_empty(&walk->walk.all))
1013 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1014 	else
1015 		x = list_first_entry(&walk->walk.all,
1016 				     struct xfrm_policy_walk_entry, all);
1017 
1018 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1019 		if (x->dead)
1020 			continue;
1021 		pol = container_of(x, struct xfrm_policy, walk);
1022 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1023 		    walk->type != pol->type)
1024 			continue;
1025 		error = func(pol, xfrm_policy_id2dir(pol->index),
1026 			     walk->seq, data);
1027 		if (error) {
1028 			list_move_tail(&walk->walk.all, &x->all);
1029 			goto out;
1030 		}
1031 		walk->seq++;
1032 	}
1033 	if (walk->seq == 0) {
1034 		error = -ENOENT;
1035 		goto out;
1036 	}
1037 	list_del_init(&walk->walk.all);
1038 out:
1039 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1040 	return error;
1041 }
1042 EXPORT_SYMBOL(xfrm_policy_walk);
1043 
1044 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1045 {
1046 	INIT_LIST_HEAD(&walk->walk.all);
1047 	walk->walk.dead = 1;
1048 	walk->type = type;
1049 	walk->seq = 0;
1050 }
1051 EXPORT_SYMBOL(xfrm_policy_walk_init);
1052 
1053 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1054 {
1055 	if (list_empty(&walk->walk.all))
1056 		return;
1057 
1058 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1059 	list_del(&walk->walk.all);
1060 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1061 }
1062 EXPORT_SYMBOL(xfrm_policy_walk_done);
1063 
1064 /*
1065  * Find policy to apply to this flow.
1066  *
1067  * Returns 0 if policy found, else an -errno.
1068  */
1069 static int xfrm_policy_match(const struct xfrm_policy *pol,
1070 			     const struct flowi *fl,
1071 			     u8 type, u16 family, int dir, u32 if_id)
1072 {
1073 	const struct xfrm_selector *sel = &pol->selector;
1074 	int ret = -ESRCH;
1075 	bool match;
1076 
1077 	if (pol->family != family ||
1078 	    pol->if_id != if_id ||
1079 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1080 	    pol->type != type)
1081 		return ret;
1082 
1083 	match = xfrm_selector_match(sel, fl, family);
1084 	if (match)
1085 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1086 						  dir);
1087 
1088 	return ret;
1089 }
1090 
1091 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
1092 						     const struct flowi *fl,
1093 						     u16 family, u8 dir,
1094 						     u32 if_id)
1095 {
1096 	int err;
1097 	struct xfrm_policy *pol, *ret;
1098 	const xfrm_address_t *daddr, *saddr;
1099 	struct hlist_head *chain;
1100 	unsigned int sequence;
1101 	u32 priority;
1102 
1103 	daddr = xfrm_flowi_daddr(fl, family);
1104 	saddr = xfrm_flowi_saddr(fl, family);
1105 	if (unlikely(!daddr || !saddr))
1106 		return NULL;
1107 
1108 	rcu_read_lock();
1109  retry:
1110 	do {
1111 		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
1112 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
1113 	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
1114 
1115 	priority = ~0U;
1116 	ret = NULL;
1117 	hlist_for_each_entry_rcu(pol, chain, bydst) {
1118 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
1119 		if (err) {
1120 			if (err == -ESRCH)
1121 				continue;
1122 			else {
1123 				ret = ERR_PTR(err);
1124 				goto fail;
1125 			}
1126 		} else {
1127 			ret = pol;
1128 			priority = ret->priority;
1129 			break;
1130 		}
1131 	}
1132 	chain = &net->xfrm.policy_inexact[dir];
1133 	hlist_for_each_entry_rcu(pol, chain, bydst) {
1134 		if ((pol->priority >= priority) && ret)
1135 			break;
1136 
1137 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
1138 		if (err) {
1139 			if (err == -ESRCH)
1140 				continue;
1141 			else {
1142 				ret = ERR_PTR(err);
1143 				goto fail;
1144 			}
1145 		} else {
1146 			ret = pol;
1147 			break;
1148 		}
1149 	}
1150 
1151 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
1152 		goto retry;
1153 
1154 	if (ret && !xfrm_pol_hold_rcu(ret))
1155 		goto retry;
1156 fail:
1157 	rcu_read_unlock();
1158 
1159 	return ret;
1160 }
1161 
1162 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
1163 					      const struct flowi *fl,
1164 					      u16 family, u8 dir, u32 if_id)
1165 {
1166 #ifdef CONFIG_XFRM_SUB_POLICY
1167 	struct xfrm_policy *pol;
1168 
1169 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
1170 					dir, if_id);
1171 	if (pol != NULL)
1172 		return pol;
1173 #endif
1174 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
1175 					 dir, if_id);
1176 }
1177 
1178 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
1179 						 const struct flowi *fl,
1180 						 u16 family, u32 if_id)
1181 {
1182 	struct xfrm_policy *pol;
1183 
1184 	rcu_read_lock();
1185  again:
1186 	pol = rcu_dereference(sk->sk_policy[dir]);
1187 	if (pol != NULL) {
1188 		bool match;
1189 		int err = 0;
1190 
1191 		if (pol->family != family) {
1192 			pol = NULL;
1193 			goto out;
1194 		}
1195 
1196 		match = xfrm_selector_match(&pol->selector, fl, family);
1197 		if (match) {
1198 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
1199 			    pol->if_id != if_id) {
1200 				pol = NULL;
1201 				goto out;
1202 			}
1203 			err = security_xfrm_policy_lookup(pol->security,
1204 						      fl->flowi_secid,
1205 						      dir);
1206 			if (!err) {
1207 				if (!xfrm_pol_hold_rcu(pol))
1208 					goto again;
1209 			} else if (err == -ESRCH) {
1210 				pol = NULL;
1211 			} else {
1212 				pol = ERR_PTR(err);
1213 			}
1214 		} else
1215 			pol = NULL;
1216 	}
1217 out:
1218 	rcu_read_unlock();
1219 	return pol;
1220 }
1221 
1222 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1223 {
1224 	struct net *net = xp_net(pol);
1225 
1226 	list_add(&pol->walk.all, &net->xfrm.policy_all);
1227 	net->xfrm.policy_count[dir]++;
1228 	xfrm_pol_hold(pol);
1229 }
1230 
1231 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1232 						int dir)
1233 {
1234 	struct net *net = xp_net(pol);
1235 
1236 	if (list_empty(&pol->walk.all))
1237 		return NULL;
1238 
1239 	/* Socket policies are not hashed. */
1240 	if (!hlist_unhashed(&pol->bydst)) {
1241 		hlist_del_rcu(&pol->bydst);
1242 		hlist_del(&pol->byidx);
1243 	}
1244 
1245 	list_del_init(&pol->walk.all);
1246 	net->xfrm.policy_count[dir]--;
1247 
1248 	return pol;
1249 }
1250 
1251 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
1252 {
1253 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
1254 }
1255 
1256 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
1257 {
1258 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
1259 }
1260 
1261 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1262 {
1263 	struct net *net = xp_net(pol);
1264 
1265 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1266 	pol = __xfrm_policy_unlink(pol, dir);
1267 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1268 	if (pol) {
1269 		xfrm_policy_kill(pol);
1270 		return 0;
1271 	}
1272 	return -ENOENT;
1273 }
1274 EXPORT_SYMBOL(xfrm_policy_delete);
1275 
1276 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1277 {
1278 	struct net *net = sock_net(sk);
1279 	struct xfrm_policy *old_pol;
1280 
1281 #ifdef CONFIG_XFRM_SUB_POLICY
1282 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1283 		return -EINVAL;
1284 #endif
1285 
1286 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1287 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
1288 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
1289 	if (pol) {
1290 		pol->curlft.add_time = ktime_get_real_seconds();
1291 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
1292 		xfrm_sk_policy_link(pol, dir);
1293 	}
1294 	rcu_assign_pointer(sk->sk_policy[dir], pol);
1295 	if (old_pol) {
1296 		if (pol)
1297 			xfrm_policy_requeue(old_pol, pol);
1298 
1299 		/* Unlinking succeeds always. This is the only function
1300 		 * allowed to delete or replace socket policy.
1301 		 */
1302 		xfrm_sk_policy_unlink(old_pol, dir);
1303 	}
1304 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1305 
1306 	if (old_pol) {
1307 		xfrm_policy_kill(old_pol);
1308 	}
1309 	return 0;
1310 }
1311 
1312 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
1313 {
1314 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1315 	struct net *net = xp_net(old);
1316 
1317 	if (newp) {
1318 		newp->selector = old->selector;
1319 		if (security_xfrm_policy_clone(old->security,
1320 					       &newp->security)) {
1321 			kfree(newp);
1322 			return NULL;  /* ENOMEM */
1323 		}
1324 		newp->lft = old->lft;
1325 		newp->curlft = old->curlft;
1326 		newp->mark = old->mark;
1327 		newp->if_id = old->if_id;
1328 		newp->action = old->action;
1329 		newp->flags = old->flags;
1330 		newp->xfrm_nr = old->xfrm_nr;
1331 		newp->index = old->index;
1332 		newp->type = old->type;
1333 		newp->family = old->family;
1334 		memcpy(newp->xfrm_vec, old->xfrm_vec,
1335 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1336 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1337 		xfrm_sk_policy_link(newp, dir);
1338 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1339 		xfrm_pol_put(newp);
1340 	}
1341 	return newp;
1342 }
1343 
1344 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1345 {
1346 	const struct xfrm_policy *p;
1347 	struct xfrm_policy *np;
1348 	int i, ret = 0;
1349 
1350 	rcu_read_lock();
1351 	for (i = 0; i < 2; i++) {
1352 		p = rcu_dereference(osk->sk_policy[i]);
1353 		if (p) {
1354 			np = clone_policy(p, i);
1355 			if (unlikely(!np)) {
1356 				ret = -ENOMEM;
1357 				break;
1358 			}
1359 			rcu_assign_pointer(sk->sk_policy[i], np);
1360 		}
1361 	}
1362 	rcu_read_unlock();
1363 	return ret;
1364 }
1365 
1366 static int
1367 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
1368 	       xfrm_address_t *remote, unsigned short family, u32 mark)
1369 {
1370 	int err;
1371 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1372 
1373 	if (unlikely(afinfo == NULL))
1374 		return -EINVAL;
1375 	err = afinfo->get_saddr(net, oif, local, remote, mark);
1376 	rcu_read_unlock();
1377 	return err;
1378 }
1379 
1380 /* Resolve list of templates for the flow, given policy. */
1381 
1382 static int
1383 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
1384 		      struct xfrm_state **xfrm, unsigned short family)
1385 {
1386 	struct net *net = xp_net(policy);
1387 	int nx;
1388 	int i, error;
1389 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1390 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1391 	xfrm_address_t tmp;
1392 
1393 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
1394 		struct xfrm_state *x;
1395 		xfrm_address_t *remote = daddr;
1396 		xfrm_address_t *local  = saddr;
1397 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1398 
1399 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1400 		    tmpl->mode == XFRM_MODE_BEET) {
1401 			remote = &tmpl->id.daddr;
1402 			local = &tmpl->saddr;
1403 			if (xfrm_addr_any(local, tmpl->encap_family)) {
1404 				error = xfrm_get_saddr(net, fl->flowi_oif,
1405 						       &tmp, remote,
1406 						       tmpl->encap_family, 0);
1407 				if (error)
1408 					goto fail;
1409 				local = &tmp;
1410 			}
1411 		}
1412 
1413 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
1414 				    family, policy->if_id);
1415 
1416 		if (x && x->km.state == XFRM_STATE_VALID) {
1417 			xfrm[nx++] = x;
1418 			daddr = remote;
1419 			saddr = local;
1420 			continue;
1421 		}
1422 		if (x) {
1423 			error = (x->km.state == XFRM_STATE_ERROR ?
1424 				 -EINVAL : -EAGAIN);
1425 			xfrm_state_put(x);
1426 		} else if (error == -ESRCH) {
1427 			error = -EAGAIN;
1428 		}
1429 
1430 		if (!tmpl->optional)
1431 			goto fail;
1432 	}
1433 	return nx;
1434 
1435 fail:
1436 	for (nx--; nx >= 0; nx--)
1437 		xfrm_state_put(xfrm[nx]);
1438 	return error;
1439 }
1440 
1441 static int
1442 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
1443 		  struct xfrm_state **xfrm, unsigned short family)
1444 {
1445 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1446 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1447 	int cnx = 0;
1448 	int error;
1449 	int ret;
1450 	int i;
1451 
1452 	for (i = 0; i < npols; i++) {
1453 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1454 			error = -ENOBUFS;
1455 			goto fail;
1456 		}
1457 
1458 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1459 		if (ret < 0) {
1460 			error = ret;
1461 			goto fail;
1462 		} else
1463 			cnx += ret;
1464 	}
1465 
1466 	/* found states are sorted for outbound processing */
1467 	if (npols > 1)
1468 		xfrm_state_sort(xfrm, tpp, cnx, family);
1469 
1470 	return cnx;
1471 
1472  fail:
1473 	for (cnx--; cnx >= 0; cnx--)
1474 		xfrm_state_put(tpp[cnx]);
1475 	return error;
1476 
1477 }
1478 
1479 static int xfrm_get_tos(const struct flowi *fl, int family)
1480 {
1481 	const struct xfrm_policy_afinfo *afinfo;
1482 	int tos;
1483 
1484 	afinfo = xfrm_policy_get_afinfo(family);
1485 	if (!afinfo)
1486 		return 0;
1487 
1488 	tos = afinfo->get_tos(fl);
1489 
1490 	rcu_read_unlock();
1491 
1492 	return tos;
1493 }
1494 
1495 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1496 {
1497 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1498 	struct dst_ops *dst_ops;
1499 	struct xfrm_dst *xdst;
1500 
1501 	if (!afinfo)
1502 		return ERR_PTR(-EINVAL);
1503 
1504 	switch (family) {
1505 	case AF_INET:
1506 		dst_ops = &net->xfrm.xfrm4_dst_ops;
1507 		break;
1508 #if IS_ENABLED(CONFIG_IPV6)
1509 	case AF_INET6:
1510 		dst_ops = &net->xfrm.xfrm6_dst_ops;
1511 		break;
1512 #endif
1513 	default:
1514 		BUG();
1515 	}
1516 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
1517 
1518 	if (likely(xdst)) {
1519 		struct dst_entry *dst = &xdst->u.dst;
1520 
1521 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
1522 	} else
1523 		xdst = ERR_PTR(-ENOBUFS);
1524 
1525 	rcu_read_unlock();
1526 
1527 	return xdst;
1528 }
1529 
1530 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1531 				 int nfheader_len)
1532 {
1533 	const struct xfrm_policy_afinfo *afinfo =
1534 		xfrm_policy_get_afinfo(dst->ops->family);
1535 	int err;
1536 
1537 	if (!afinfo)
1538 		return -EINVAL;
1539 
1540 	err = afinfo->init_path(path, dst, nfheader_len);
1541 
1542 	rcu_read_unlock();
1543 
1544 	return err;
1545 }
1546 
1547 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1548 				const struct flowi *fl)
1549 {
1550 	const struct xfrm_policy_afinfo *afinfo =
1551 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1552 	int err;
1553 
1554 	if (!afinfo)
1555 		return -EINVAL;
1556 
1557 	err = afinfo->fill_dst(xdst, dev, fl);
1558 
1559 	rcu_read_unlock();
1560 
1561 	return err;
1562 }
1563 
1564 
1565 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1566  * all the metrics... Shortly, bundle a bundle.
1567  */
1568 
1569 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1570 					    struct xfrm_state **xfrm,
1571 					    struct xfrm_dst **bundle,
1572 					    int nx,
1573 					    const struct flowi *fl,
1574 					    struct dst_entry *dst)
1575 {
1576 	struct net *net = xp_net(policy);
1577 	unsigned long now = jiffies;
1578 	struct net_device *dev;
1579 	struct xfrm_mode *inner_mode;
1580 	struct xfrm_dst *xdst_prev = NULL;
1581 	struct xfrm_dst *xdst0 = NULL;
1582 	int i = 0;
1583 	int err;
1584 	int header_len = 0;
1585 	int nfheader_len = 0;
1586 	int trailer_len = 0;
1587 	int tos;
1588 	int family = policy->selector.family;
1589 	xfrm_address_t saddr, daddr;
1590 
1591 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1592 
1593 	tos = xfrm_get_tos(fl, family);
1594 
1595 	dst_hold(dst);
1596 
1597 	for (; i < nx; i++) {
1598 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
1599 		struct dst_entry *dst1 = &xdst->u.dst;
1600 
1601 		err = PTR_ERR(xdst);
1602 		if (IS_ERR(xdst)) {
1603 			dst_release(dst);
1604 			goto put_states;
1605 		}
1606 
1607 		bundle[i] = xdst;
1608 		if (!xdst_prev)
1609 			xdst0 = xdst;
1610 		else
1611 			/* Ref count is taken during xfrm_alloc_dst()
1612 			 * No need to do dst_clone() on dst1
1613 			 */
1614 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
1615 
1616 		if (xfrm[i]->sel.family == AF_UNSPEC) {
1617 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
1618 							xfrm_af2proto(family));
1619 			if (!inner_mode) {
1620 				err = -EAFNOSUPPORT;
1621 				dst_release(dst);
1622 				goto put_states;
1623 			}
1624 		} else
1625 			inner_mode = xfrm[i]->inner_mode;
1626 
1627 		xdst->route = dst;
1628 		dst_copy_metrics(dst1, dst);
1629 
1630 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1631 			__u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
1632 
1633 			family = xfrm[i]->props.family;
1634 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
1635 					      &saddr, &daddr, family, mark);
1636 			err = PTR_ERR(dst);
1637 			if (IS_ERR(dst))
1638 				goto put_states;
1639 		} else
1640 			dst_hold(dst);
1641 
1642 		dst1->xfrm = xfrm[i];
1643 		xdst->xfrm_genid = xfrm[i]->genid;
1644 
1645 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1646 		dst1->flags |= DST_HOST;
1647 		dst1->lastuse = now;
1648 
1649 		dst1->input = dst_discard;
1650 		dst1->output = inner_mode->afinfo->output;
1651 
1652 		xdst_prev = xdst;
1653 
1654 		header_len += xfrm[i]->props.header_len;
1655 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1656 			nfheader_len += xfrm[i]->props.header_len;
1657 		trailer_len += xfrm[i]->props.trailer_len;
1658 	}
1659 
1660 	xfrm_dst_set_child(xdst_prev, dst);
1661 	xdst0->path = dst;
1662 
1663 	err = -ENODEV;
1664 	dev = dst->dev;
1665 	if (!dev)
1666 		goto free_dst;
1667 
1668 	xfrm_init_path(xdst0, dst, nfheader_len);
1669 	xfrm_init_pmtu(bundle, nx);
1670 
1671 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
1672 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
1673 		err = xfrm_fill_dst(xdst_prev, dev, fl);
1674 		if (err)
1675 			goto free_dst;
1676 
1677 		xdst_prev->u.dst.header_len = header_len;
1678 		xdst_prev->u.dst.trailer_len = trailer_len;
1679 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
1680 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
1681 	}
1682 
1683 	return &xdst0->u.dst;
1684 
1685 put_states:
1686 	for (; i < nx; i++)
1687 		xfrm_state_put(xfrm[i]);
1688 free_dst:
1689 	if (xdst0)
1690 		dst_release_immediate(&xdst0->u.dst);
1691 
1692 	return ERR_PTR(err);
1693 }
1694 
1695 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1696 				struct xfrm_policy **pols,
1697 				int *num_pols, int *num_xfrms)
1698 {
1699 	int i;
1700 
1701 	if (*num_pols == 0 || !pols[0]) {
1702 		*num_pols = 0;
1703 		*num_xfrms = 0;
1704 		return 0;
1705 	}
1706 	if (IS_ERR(pols[0]))
1707 		return PTR_ERR(pols[0]);
1708 
1709 	*num_xfrms = pols[0]->xfrm_nr;
1710 
1711 #ifdef CONFIG_XFRM_SUB_POLICY
1712 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1713 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1714 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1715 						    XFRM_POLICY_TYPE_MAIN,
1716 						    fl, family,
1717 						    XFRM_POLICY_OUT,
1718 						    pols[0]->if_id);
1719 		if (pols[1]) {
1720 			if (IS_ERR(pols[1])) {
1721 				xfrm_pols_put(pols, *num_pols);
1722 				return PTR_ERR(pols[1]);
1723 			}
1724 			(*num_pols)++;
1725 			(*num_xfrms) += pols[1]->xfrm_nr;
1726 		}
1727 	}
1728 #endif
1729 	for (i = 0; i < *num_pols; i++) {
1730 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
1731 			*num_xfrms = -1;
1732 			break;
1733 		}
1734 	}
1735 
1736 	return 0;
1737 
1738 }
1739 
1740 static struct xfrm_dst *
1741 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1742 			       const struct flowi *fl, u16 family,
1743 			       struct dst_entry *dst_orig)
1744 {
1745 	struct net *net = xp_net(pols[0]);
1746 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1747 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
1748 	struct xfrm_dst *xdst;
1749 	struct dst_entry *dst;
1750 	int err;
1751 
1752 	/* Try to instantiate a bundle */
1753 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1754 	if (err <= 0) {
1755 		if (err == 0)
1756 			return NULL;
1757 
1758 		if (err != -EAGAIN)
1759 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1760 		return ERR_PTR(err);
1761 	}
1762 
1763 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
1764 	if (IS_ERR(dst)) {
1765 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1766 		return ERR_CAST(dst);
1767 	}
1768 
1769 	xdst = (struct xfrm_dst *)dst;
1770 	xdst->num_xfrms = err;
1771 	xdst->num_pols = num_pols;
1772 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1773 	xdst->policy_genid = atomic_read(&pols[0]->genid);
1774 
1775 	return xdst;
1776 }
1777 
1778 static void xfrm_policy_queue_process(struct timer_list *t)
1779 {
1780 	struct sk_buff *skb;
1781 	struct sock *sk;
1782 	struct dst_entry *dst;
1783 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
1784 	struct net *net = xp_net(pol);
1785 	struct xfrm_policy_queue *pq = &pol->polq;
1786 	struct flowi fl;
1787 	struct sk_buff_head list;
1788 
1789 	spin_lock(&pq->hold_queue.lock);
1790 	skb = skb_peek(&pq->hold_queue);
1791 	if (!skb) {
1792 		spin_unlock(&pq->hold_queue.lock);
1793 		goto out;
1794 	}
1795 	dst = skb_dst(skb);
1796 	sk = skb->sk;
1797 	xfrm_decode_session(skb, &fl, dst->ops->family);
1798 	spin_unlock(&pq->hold_queue.lock);
1799 
1800 	dst_hold(xfrm_dst_path(dst));
1801 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
1802 	if (IS_ERR(dst))
1803 		goto purge_queue;
1804 
1805 	if (dst->flags & DST_XFRM_QUEUE) {
1806 		dst_release(dst);
1807 
1808 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
1809 			goto purge_queue;
1810 
1811 		pq->timeout = pq->timeout << 1;
1812 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
1813 			xfrm_pol_hold(pol);
1814 	goto out;
1815 	}
1816 
1817 	dst_release(dst);
1818 
1819 	__skb_queue_head_init(&list);
1820 
1821 	spin_lock(&pq->hold_queue.lock);
1822 	pq->timeout = 0;
1823 	skb_queue_splice_init(&pq->hold_queue, &list);
1824 	spin_unlock(&pq->hold_queue.lock);
1825 
1826 	while (!skb_queue_empty(&list)) {
1827 		skb = __skb_dequeue(&list);
1828 
1829 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
1830 		dst_hold(xfrm_dst_path(skb_dst(skb)));
1831 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
1832 		if (IS_ERR(dst)) {
1833 			kfree_skb(skb);
1834 			continue;
1835 		}
1836 
1837 		nf_reset(skb);
1838 		skb_dst_drop(skb);
1839 		skb_dst_set(skb, dst);
1840 
1841 		dst_output(net, skb->sk, skb);
1842 	}
1843 
1844 out:
1845 	xfrm_pol_put(pol);
1846 	return;
1847 
1848 purge_queue:
1849 	pq->timeout = 0;
1850 	skb_queue_purge(&pq->hold_queue);
1851 	xfrm_pol_put(pol);
1852 }
1853 
1854 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1855 {
1856 	unsigned long sched_next;
1857 	struct dst_entry *dst = skb_dst(skb);
1858 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1859 	struct xfrm_policy *pol = xdst->pols[0];
1860 	struct xfrm_policy_queue *pq = &pol->polq;
1861 
1862 	if (unlikely(skb_fclone_busy(sk, skb))) {
1863 		kfree_skb(skb);
1864 		return 0;
1865 	}
1866 
1867 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
1868 		kfree_skb(skb);
1869 		return -EAGAIN;
1870 	}
1871 
1872 	skb_dst_force(skb);
1873 
1874 	spin_lock_bh(&pq->hold_queue.lock);
1875 
1876 	if (!pq->timeout)
1877 		pq->timeout = XFRM_QUEUE_TMO_MIN;
1878 
1879 	sched_next = jiffies + pq->timeout;
1880 
1881 	if (del_timer(&pq->hold_timer)) {
1882 		if (time_before(pq->hold_timer.expires, sched_next))
1883 			sched_next = pq->hold_timer.expires;
1884 		xfrm_pol_put(pol);
1885 	}
1886 
1887 	__skb_queue_tail(&pq->hold_queue, skb);
1888 	if (!mod_timer(&pq->hold_timer, sched_next))
1889 		xfrm_pol_hold(pol);
1890 
1891 	spin_unlock_bh(&pq->hold_queue.lock);
1892 
1893 	return 0;
1894 }
1895 
1896 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
1897 						 struct xfrm_flo *xflo,
1898 						 const struct flowi *fl,
1899 						 int num_xfrms,
1900 						 u16 family)
1901 {
1902 	int err;
1903 	struct net_device *dev;
1904 	struct dst_entry *dst;
1905 	struct dst_entry *dst1;
1906 	struct xfrm_dst *xdst;
1907 
1908 	xdst = xfrm_alloc_dst(net, family);
1909 	if (IS_ERR(xdst))
1910 		return xdst;
1911 
1912 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
1913 	    net->xfrm.sysctl_larval_drop ||
1914 	    num_xfrms <= 0)
1915 		return xdst;
1916 
1917 	dst = xflo->dst_orig;
1918 	dst1 = &xdst->u.dst;
1919 	dst_hold(dst);
1920 	xdst->route = dst;
1921 
1922 	dst_copy_metrics(dst1, dst);
1923 
1924 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
1925 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
1926 	dst1->lastuse = jiffies;
1927 
1928 	dst1->input = dst_discard;
1929 	dst1->output = xdst_queue_output;
1930 
1931 	dst_hold(dst);
1932 	xfrm_dst_set_child(xdst, dst);
1933 	xdst->path = dst;
1934 
1935 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
1936 
1937 	err = -ENODEV;
1938 	dev = dst->dev;
1939 	if (!dev)
1940 		goto free_dst;
1941 
1942 	err = xfrm_fill_dst(xdst, dev, fl);
1943 	if (err)
1944 		goto free_dst;
1945 
1946 out:
1947 	return xdst;
1948 
1949 free_dst:
1950 	dst_release(dst1);
1951 	xdst = ERR_PTR(err);
1952 	goto out;
1953 }
1954 
1955 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
1956 					   const struct flowi *fl,
1957 					   u16 family, u8 dir,
1958 					   struct xfrm_flo *xflo, u32 if_id)
1959 {
1960 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1961 	int num_pols = 0, num_xfrms = 0, err;
1962 	struct xfrm_dst *xdst;
1963 
1964 	/* Resolve policies to use if we couldn't get them from
1965 	 * previous cache entry */
1966 	num_pols = 1;
1967 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
1968 	err = xfrm_expand_policies(fl, family, pols,
1969 					   &num_pols, &num_xfrms);
1970 	if (err < 0)
1971 		goto inc_error;
1972 	if (num_pols == 0)
1973 		return NULL;
1974 	if (num_xfrms <= 0)
1975 		goto make_dummy_bundle;
1976 
1977 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
1978 					      xflo->dst_orig);
1979 	if (IS_ERR(xdst)) {
1980 		err = PTR_ERR(xdst);
1981 		if (err == -EREMOTE) {
1982 			xfrm_pols_put(pols, num_pols);
1983 			return NULL;
1984 		}
1985 
1986 		if (err != -EAGAIN)
1987 			goto error;
1988 		goto make_dummy_bundle;
1989 	} else if (xdst == NULL) {
1990 		num_xfrms = 0;
1991 		goto make_dummy_bundle;
1992 	}
1993 
1994 	return xdst;
1995 
1996 make_dummy_bundle:
1997 	/* We found policies, but there's no bundles to instantiate:
1998 	 * either because the policy blocks, has no transformations or
1999 	 * we could not build template (no xfrm_states).*/
2000 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2001 	if (IS_ERR(xdst)) {
2002 		xfrm_pols_put(pols, num_pols);
2003 		return ERR_CAST(xdst);
2004 	}
2005 	xdst->num_pols = num_pols;
2006 	xdst->num_xfrms = num_xfrms;
2007 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2008 
2009 	return xdst;
2010 
2011 inc_error:
2012 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2013 error:
2014 	xfrm_pols_put(pols, num_pols);
2015 	return ERR_PTR(err);
2016 }
2017 
2018 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2019 					struct dst_entry *dst_orig)
2020 {
2021 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2022 	struct dst_entry *ret;
2023 
2024 	if (!afinfo) {
2025 		dst_release(dst_orig);
2026 		return ERR_PTR(-EINVAL);
2027 	} else {
2028 		ret = afinfo->blackhole_route(net, dst_orig);
2029 	}
2030 	rcu_read_unlock();
2031 
2032 	return ret;
2033 }
2034 
2035 /* Finds/creates a bundle for given flow and if_id
2036  *
2037  * At the moment we eat a raw IP route. Mostly to speed up lookups
2038  * on interfaces with disabled IPsec.
2039  *
2040  * xfrm_lookup uses an if_id of 0 by default, and is provided for
2041  * compatibility
2042  */
2043 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
2044 					struct dst_entry *dst_orig,
2045 					const struct flowi *fl,
2046 					const struct sock *sk,
2047 					int flags, u32 if_id)
2048 {
2049 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2050 	struct xfrm_dst *xdst;
2051 	struct dst_entry *dst, *route;
2052 	u16 family = dst_orig->ops->family;
2053 	u8 dir = XFRM_POLICY_OUT;
2054 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
2055 
2056 	dst = NULL;
2057 	xdst = NULL;
2058 	route = NULL;
2059 
2060 	sk = sk_const_to_full_sk(sk);
2061 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
2062 		num_pols = 1;
2063 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
2064 						if_id);
2065 		err = xfrm_expand_policies(fl, family, pols,
2066 					   &num_pols, &num_xfrms);
2067 		if (err < 0)
2068 			goto dropdst;
2069 
2070 		if (num_pols) {
2071 			if (num_xfrms <= 0) {
2072 				drop_pols = num_pols;
2073 				goto no_transform;
2074 			}
2075 
2076 			xdst = xfrm_resolve_and_create_bundle(
2077 					pols, num_pols, fl,
2078 					family, dst_orig);
2079 
2080 			if (IS_ERR(xdst)) {
2081 				xfrm_pols_put(pols, num_pols);
2082 				err = PTR_ERR(xdst);
2083 				if (err == -EREMOTE)
2084 					goto nopol;
2085 
2086 				goto dropdst;
2087 			} else if (xdst == NULL) {
2088 				num_xfrms = 0;
2089 				drop_pols = num_pols;
2090 				goto no_transform;
2091 			}
2092 
2093 			route = xdst->route;
2094 		}
2095 	}
2096 
2097 	if (xdst == NULL) {
2098 		struct xfrm_flo xflo;
2099 
2100 		xflo.dst_orig = dst_orig;
2101 		xflo.flags = flags;
2102 
2103 		/* To accelerate a bit...  */
2104 		if ((dst_orig->flags & DST_NOXFRM) ||
2105 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
2106 			goto nopol;
2107 
2108 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
2109 		if (xdst == NULL)
2110 			goto nopol;
2111 		if (IS_ERR(xdst)) {
2112 			err = PTR_ERR(xdst);
2113 			goto dropdst;
2114 		}
2115 
2116 		num_pols = xdst->num_pols;
2117 		num_xfrms = xdst->num_xfrms;
2118 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
2119 		route = xdst->route;
2120 	}
2121 
2122 	dst = &xdst->u.dst;
2123 	if (route == NULL && num_xfrms > 0) {
2124 		/* The only case when xfrm_bundle_lookup() returns a
2125 		 * bundle with null route, is when the template could
2126 		 * not be resolved. It means policies are there, but
2127 		 * bundle could not be created, since we don't yet
2128 		 * have the xfrm_state's. We need to wait for KM to
2129 		 * negotiate new SA's or bail out with error.*/
2130 		if (net->xfrm.sysctl_larval_drop) {
2131 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2132 			err = -EREMOTE;
2133 			goto error;
2134 		}
2135 
2136 		err = -EAGAIN;
2137 
2138 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
2139 		goto error;
2140 	}
2141 
2142 no_transform:
2143 	if (num_pols == 0)
2144 		goto nopol;
2145 
2146 	if ((flags & XFRM_LOOKUP_ICMP) &&
2147 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
2148 		err = -ENOENT;
2149 		goto error;
2150 	}
2151 
2152 	for (i = 0; i < num_pols; i++)
2153 		pols[i]->curlft.use_time = ktime_get_real_seconds();
2154 
2155 	if (num_xfrms < 0) {
2156 		/* Prohibit the flow */
2157 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
2158 		err = -EPERM;
2159 		goto error;
2160 	} else if (num_xfrms > 0) {
2161 		/* Flow transformed */
2162 		dst_release(dst_orig);
2163 	} else {
2164 		/* Flow passes untransformed */
2165 		dst_release(dst);
2166 		dst = dst_orig;
2167 	}
2168 ok:
2169 	xfrm_pols_put(pols, drop_pols);
2170 	if (dst && dst->xfrm &&
2171 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2172 		dst->flags |= DST_XFRM_TUNNEL;
2173 	return dst;
2174 
2175 nopol:
2176 	if (!(flags & XFRM_LOOKUP_ICMP)) {
2177 		dst = dst_orig;
2178 		goto ok;
2179 	}
2180 	err = -ENOENT;
2181 error:
2182 	dst_release(dst);
2183 dropdst:
2184 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
2185 		dst_release(dst_orig);
2186 	xfrm_pols_put(pols, drop_pols);
2187 	return ERR_PTR(err);
2188 }
2189 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
2190 
2191 /* Main function: finds/creates a bundle for given flow.
2192  *
2193  * At the moment we eat a raw IP route. Mostly to speed up lookups
2194  * on interfaces with disabled IPsec.
2195  */
2196 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
2197 			      const struct flowi *fl, const struct sock *sk,
2198 			      int flags)
2199 {
2200 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
2201 }
2202 EXPORT_SYMBOL(xfrm_lookup);
2203 
2204 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
2205  * Otherwise we may send out blackholed packets.
2206  */
2207 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
2208 				    const struct flowi *fl,
2209 				    const struct sock *sk, int flags)
2210 {
2211 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
2212 					    flags | XFRM_LOOKUP_QUEUE |
2213 					    XFRM_LOOKUP_KEEP_DST_REF);
2214 
2215 	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
2216 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
2217 
2218 	if (IS_ERR(dst))
2219 		dst_release(dst_orig);
2220 
2221 	return dst;
2222 }
2223 EXPORT_SYMBOL(xfrm_lookup_route);
2224 
2225 static inline int
2226 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
2227 {
2228 	struct xfrm_state *x;
2229 
2230 	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
2231 		return 0;
2232 	x = skb->sp->xvec[idx];
2233 	if (!x->type->reject)
2234 		return 0;
2235 	return x->type->reject(x, skb, fl);
2236 }
2237 
2238 /* When skb is transformed back to its "native" form, we have to
2239  * check policy restrictions. At the moment we make this in maximally
2240  * stupid way. Shame on me. :-) Of course, connected sockets must
2241  * have policy cached at them.
2242  */
2243 
2244 static inline int
2245 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
2246 	      unsigned short family)
2247 {
2248 	if (xfrm_state_kern(x))
2249 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
2250 	return	x->id.proto == tmpl->id.proto &&
2251 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
2252 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
2253 		x->props.mode == tmpl->mode &&
2254 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
2255 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
2256 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
2257 		  xfrm_state_addr_cmp(tmpl, x, family));
2258 }
2259 
2260 /*
2261  * 0 or more than 0 is returned when validation is succeeded (either bypass
2262  * because of optional transport mode, or next index of the mathced secpath
2263  * state with the template.
2264  * -1 is returned when no matching template is found.
2265  * Otherwise "-2 - errored_index" is returned.
2266  */
2267 static inline int
2268 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
2269 	       unsigned short family)
2270 {
2271 	int idx = start;
2272 
2273 	if (tmpl->optional) {
2274 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
2275 			return start;
2276 	} else
2277 		start = -1;
2278 	for (; idx < sp->len; idx++) {
2279 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
2280 			return ++idx;
2281 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
2282 			if (start == -1)
2283 				start = -2-idx;
2284 			break;
2285 		}
2286 	}
2287 	return start;
2288 }
2289 
2290 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
2291 			  unsigned int family, int reverse)
2292 {
2293 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2294 	int err;
2295 
2296 	if (unlikely(afinfo == NULL))
2297 		return -EAFNOSUPPORT;
2298 
2299 	afinfo->decode_session(skb, fl, reverse);
2300 
2301 	err = security_xfrm_decode_session(skb, &fl->flowi_secid);
2302 	rcu_read_unlock();
2303 	return err;
2304 }
2305 EXPORT_SYMBOL(__xfrm_decode_session);
2306 
2307 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
2308 {
2309 	for (; k < sp->len; k++) {
2310 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
2311 			*idxp = k;
2312 			return 1;
2313 		}
2314 	}
2315 
2316 	return 0;
2317 }
2318 
2319 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2320 			unsigned short family)
2321 {
2322 	struct net *net = dev_net(skb->dev);
2323 	struct xfrm_policy *pol;
2324 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2325 	int npols = 0;
2326 	int xfrm_nr;
2327 	int pi;
2328 	int reverse;
2329 	struct flowi fl;
2330 	int xerr_idx = -1;
2331 	const struct xfrm_if_cb *ifcb;
2332 	struct xfrm_if *xi;
2333 	u32 if_id = 0;
2334 
2335 	rcu_read_lock();
2336 	ifcb = xfrm_if_get_cb();
2337 
2338 	if (ifcb) {
2339 		xi = ifcb->decode_session(skb);
2340 		if (xi)
2341 			if_id = xi->p.if_id;
2342 	}
2343 	rcu_read_unlock();
2344 
2345 	reverse = dir & ~XFRM_POLICY_MASK;
2346 	dir &= XFRM_POLICY_MASK;
2347 
2348 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
2349 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2350 		return 0;
2351 	}
2352 
2353 	nf_nat_decode_session(skb, &fl, family);
2354 
2355 	/* First, check used SA against their selectors. */
2356 	if (skb->sp) {
2357 		int i;
2358 
2359 		for (i = skb->sp->len-1; i >= 0; i--) {
2360 			struct xfrm_state *x = skb->sp->xvec[i];
2361 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
2362 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
2363 				return 0;
2364 			}
2365 		}
2366 	}
2367 
2368 	pol = NULL;
2369 	sk = sk_to_full_sk(sk);
2370 	if (sk && sk->sk_policy[dir]) {
2371 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
2372 		if (IS_ERR(pol)) {
2373 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2374 			return 0;
2375 		}
2376 	}
2377 
2378 	if (!pol)
2379 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
2380 
2381 	if (IS_ERR(pol)) {
2382 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2383 		return 0;
2384 	}
2385 
2386 	if (!pol) {
2387 		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
2388 			xfrm_secpath_reject(xerr_idx, skb, &fl);
2389 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
2390 			return 0;
2391 		}
2392 		return 1;
2393 	}
2394 
2395 	pol->curlft.use_time = ktime_get_real_seconds();
2396 
2397 	pols[0] = pol;
2398 	npols++;
2399 #ifdef CONFIG_XFRM_SUB_POLICY
2400 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2401 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
2402 						    &fl, family,
2403 						    XFRM_POLICY_IN, if_id);
2404 		if (pols[1]) {
2405 			if (IS_ERR(pols[1])) {
2406 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2407 				return 0;
2408 			}
2409 			pols[1]->curlft.use_time = ktime_get_real_seconds();
2410 			npols++;
2411 		}
2412 	}
2413 #endif
2414 
2415 	if (pol->action == XFRM_POLICY_ALLOW) {
2416 		struct sec_path *sp;
2417 		static struct sec_path dummy;
2418 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
2419 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
2420 		struct xfrm_tmpl **tpp = tp;
2421 		int ti = 0;
2422 		int i, k;
2423 
2424 		if ((sp = skb->sp) == NULL)
2425 			sp = &dummy;
2426 
2427 		for (pi = 0; pi < npols; pi++) {
2428 			if (pols[pi] != pol &&
2429 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
2430 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2431 				goto reject;
2432 			}
2433 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
2434 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
2435 				goto reject_error;
2436 			}
2437 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
2438 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
2439 		}
2440 		xfrm_nr = ti;
2441 		if (npols > 1) {
2442 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net);
2443 			tpp = stp;
2444 		}
2445 
2446 		/* For each tunnel xfrm, find the first matching tmpl.
2447 		 * For each tmpl before that, find corresponding xfrm.
2448 		 * Order is _important_. Later we will implement
2449 		 * some barriers, but at the moment barriers
2450 		 * are implied between each two transformations.
2451 		 */
2452 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
2453 			k = xfrm_policy_ok(tpp[i], sp, k, family);
2454 			if (k < 0) {
2455 				if (k < -1)
2456 					/* "-2 - errored_index" returned */
2457 					xerr_idx = -(2+k);
2458 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2459 				goto reject;
2460 			}
2461 		}
2462 
2463 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2464 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2465 			goto reject;
2466 		}
2467 
2468 		xfrm_pols_put(pols, npols);
2469 		return 1;
2470 	}
2471 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2472 
2473 reject:
2474 	xfrm_secpath_reject(xerr_idx, skb, &fl);
2475 reject_error:
2476 	xfrm_pols_put(pols, npols);
2477 	return 0;
2478 }
2479 EXPORT_SYMBOL(__xfrm_policy_check);
2480 
2481 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2482 {
2483 	struct net *net = dev_net(skb->dev);
2484 	struct flowi fl;
2485 	struct dst_entry *dst;
2486 	int res = 1;
2487 
2488 	if (xfrm_decode_session(skb, &fl, family) < 0) {
2489 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
2490 		return 0;
2491 	}
2492 
2493 	skb_dst_force(skb);
2494 
2495 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
2496 	if (IS_ERR(dst)) {
2497 		res = 0;
2498 		dst = NULL;
2499 	}
2500 	skb_dst_set(skb, dst);
2501 	return res;
2502 }
2503 EXPORT_SYMBOL(__xfrm_route_forward);
2504 
2505 /* Optimize later using cookies and generation ids. */
2506 
2507 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2508 {
2509 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2510 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
2511 	 * get validated by dst_ops->check on every use.  We do this
2512 	 * because when a normal route referenced by an XFRM dst is
2513 	 * obsoleted we do not go looking around for all parent
2514 	 * referencing XFRM dsts so that we can invalidate them.  It
2515 	 * is just too much work.  Instead we make the checks here on
2516 	 * every use.  For example:
2517 	 *
2518 	 *	XFRM dst A --> IPv4 dst X
2519 	 *
2520 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2521 	 * in this example).  If X is marked obsolete, "A" will not
2522 	 * notice.  That's what we are validating here via the
2523 	 * stale_bundle() check.
2524 	 *
2525 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
2526 	 * be marked on it.
2527 	 * This will force stale_bundle() to fail on any xdst bundle with
2528 	 * this dst linked in it.
2529 	 */
2530 	if (dst->obsolete < 0 && !stale_bundle(dst))
2531 		return dst;
2532 
2533 	return NULL;
2534 }
2535 
2536 static int stale_bundle(struct dst_entry *dst)
2537 {
2538 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2539 }
2540 
2541 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2542 {
2543 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
2544 		dst->dev = dev_net(dev)->loopback_dev;
2545 		dev_hold(dst->dev);
2546 		dev_put(dev);
2547 	}
2548 }
2549 EXPORT_SYMBOL(xfrm_dst_ifdown);
2550 
2551 static void xfrm_link_failure(struct sk_buff *skb)
2552 {
2553 	/* Impossible. Such dst must be popped before reaches point of failure. */
2554 }
2555 
2556 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2557 {
2558 	if (dst) {
2559 		if (dst->obsolete) {
2560 			dst_release(dst);
2561 			dst = NULL;
2562 		}
2563 	}
2564 	return dst;
2565 }
2566 
2567 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
2568 {
2569 	while (nr--) {
2570 		struct xfrm_dst *xdst = bundle[nr];
2571 		u32 pmtu, route_mtu_cached;
2572 		struct dst_entry *dst;
2573 
2574 		dst = &xdst->u.dst;
2575 		pmtu = dst_mtu(xfrm_dst_child(dst));
2576 		xdst->child_mtu_cached = pmtu;
2577 
2578 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2579 
2580 		route_mtu_cached = dst_mtu(xdst->route);
2581 		xdst->route_mtu_cached = route_mtu_cached;
2582 
2583 		if (pmtu > route_mtu_cached)
2584 			pmtu = route_mtu_cached;
2585 
2586 		dst_metric_set(dst, RTAX_MTU, pmtu);
2587 	}
2588 }
2589 
2590 /* Check that the bundle accepts the flow and its components are
2591  * still valid.
2592  */
2593 
2594 static int xfrm_bundle_ok(struct xfrm_dst *first)
2595 {
2596 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2597 	struct dst_entry *dst = &first->u.dst;
2598 	struct xfrm_dst *xdst;
2599 	int start_from, nr;
2600 	u32 mtu;
2601 
2602 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
2603 	    (dst->dev && !netif_running(dst->dev)))
2604 		return 0;
2605 
2606 	if (dst->flags & DST_XFRM_QUEUE)
2607 		return 1;
2608 
2609 	start_from = nr = 0;
2610 	do {
2611 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2612 
2613 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2614 			return 0;
2615 		if (xdst->xfrm_genid != dst->xfrm->genid)
2616 			return 0;
2617 		if (xdst->num_pols > 0 &&
2618 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2619 			return 0;
2620 
2621 		bundle[nr++] = xdst;
2622 
2623 		mtu = dst_mtu(xfrm_dst_child(dst));
2624 		if (xdst->child_mtu_cached != mtu) {
2625 			start_from = nr;
2626 			xdst->child_mtu_cached = mtu;
2627 		}
2628 
2629 		if (!dst_check(xdst->route, xdst->route_cookie))
2630 			return 0;
2631 		mtu = dst_mtu(xdst->route);
2632 		if (xdst->route_mtu_cached != mtu) {
2633 			start_from = nr;
2634 			xdst->route_mtu_cached = mtu;
2635 		}
2636 
2637 		dst = xfrm_dst_child(dst);
2638 	} while (dst->xfrm);
2639 
2640 	if (likely(!start_from))
2641 		return 1;
2642 
2643 	xdst = bundle[start_from - 1];
2644 	mtu = xdst->child_mtu_cached;
2645 	while (start_from--) {
2646 		dst = &xdst->u.dst;
2647 
2648 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2649 		if (mtu > xdst->route_mtu_cached)
2650 			mtu = xdst->route_mtu_cached;
2651 		dst_metric_set(dst, RTAX_MTU, mtu);
2652 		if (!start_from)
2653 			break;
2654 
2655 		xdst = bundle[start_from - 1];
2656 		xdst->child_mtu_cached = mtu;
2657 	}
2658 
2659 	return 1;
2660 }
2661 
2662 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
2663 {
2664 	return dst_metric_advmss(xfrm_dst_path(dst));
2665 }
2666 
2667 static unsigned int xfrm_mtu(const struct dst_entry *dst)
2668 {
2669 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2670 
2671 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
2672 }
2673 
2674 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
2675 					const void *daddr)
2676 {
2677 	while (dst->xfrm) {
2678 		const struct xfrm_state *xfrm = dst->xfrm;
2679 
2680 		dst = xfrm_dst_child(dst);
2681 
2682 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
2683 			continue;
2684 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
2685 			daddr = xfrm->coaddr;
2686 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
2687 			daddr = &xfrm->id.daddr;
2688 	}
2689 	return daddr;
2690 }
2691 
2692 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
2693 					   struct sk_buff *skb,
2694 					   const void *daddr)
2695 {
2696 	const struct dst_entry *path = xfrm_dst_path(dst);
2697 
2698 	if (!skb)
2699 		daddr = xfrm_get_dst_nexthop(dst, daddr);
2700 	return path->ops->neigh_lookup(path, skb, daddr);
2701 }
2702 
2703 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
2704 {
2705 	const struct dst_entry *path = xfrm_dst_path(dst);
2706 
2707 	daddr = xfrm_get_dst_nexthop(dst, daddr);
2708 	path->ops->confirm_neigh(path, daddr);
2709 }
2710 
2711 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
2712 {
2713 	int err = 0;
2714 
2715 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
2716 		return -EAFNOSUPPORT;
2717 
2718 	spin_lock(&xfrm_policy_afinfo_lock);
2719 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
2720 		err = -EEXIST;
2721 	else {
2722 		struct dst_ops *dst_ops = afinfo->dst_ops;
2723 		if (likely(dst_ops->kmem_cachep == NULL))
2724 			dst_ops->kmem_cachep = xfrm_dst_cache;
2725 		if (likely(dst_ops->check == NULL))
2726 			dst_ops->check = xfrm_dst_check;
2727 		if (likely(dst_ops->default_advmss == NULL))
2728 			dst_ops->default_advmss = xfrm_default_advmss;
2729 		if (likely(dst_ops->mtu == NULL))
2730 			dst_ops->mtu = xfrm_mtu;
2731 		if (likely(dst_ops->negative_advice == NULL))
2732 			dst_ops->negative_advice = xfrm_negative_advice;
2733 		if (likely(dst_ops->link_failure == NULL))
2734 			dst_ops->link_failure = xfrm_link_failure;
2735 		if (likely(dst_ops->neigh_lookup == NULL))
2736 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
2737 		if (likely(!dst_ops->confirm_neigh))
2738 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
2739 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
2740 	}
2741 	spin_unlock(&xfrm_policy_afinfo_lock);
2742 
2743 	return err;
2744 }
2745 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2746 
2747 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
2748 {
2749 	struct dst_ops *dst_ops = afinfo->dst_ops;
2750 	int i;
2751 
2752 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
2753 		if (xfrm_policy_afinfo[i] != afinfo)
2754 			continue;
2755 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
2756 		break;
2757 	}
2758 
2759 	synchronize_rcu();
2760 
2761 	dst_ops->kmem_cachep = NULL;
2762 	dst_ops->check = NULL;
2763 	dst_ops->negative_advice = NULL;
2764 	dst_ops->link_failure = NULL;
2765 }
2766 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2767 
2768 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
2769 {
2770 	spin_lock(&xfrm_if_cb_lock);
2771 	rcu_assign_pointer(xfrm_if_cb, ifcb);
2772 	spin_unlock(&xfrm_if_cb_lock);
2773 }
2774 EXPORT_SYMBOL(xfrm_if_register_cb);
2775 
2776 void xfrm_if_unregister_cb(void)
2777 {
2778 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
2779 	synchronize_rcu();
2780 }
2781 EXPORT_SYMBOL(xfrm_if_unregister_cb);
2782 
2783 #ifdef CONFIG_XFRM_STATISTICS
2784 static int __net_init xfrm_statistics_init(struct net *net)
2785 {
2786 	int rv;
2787 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
2788 	if (!net->mib.xfrm_statistics)
2789 		return -ENOMEM;
2790 	rv = xfrm_proc_init(net);
2791 	if (rv < 0)
2792 		free_percpu(net->mib.xfrm_statistics);
2793 	return rv;
2794 }
2795 
2796 static void xfrm_statistics_fini(struct net *net)
2797 {
2798 	xfrm_proc_fini(net);
2799 	free_percpu(net->mib.xfrm_statistics);
2800 }
2801 #else
2802 static int __net_init xfrm_statistics_init(struct net *net)
2803 {
2804 	return 0;
2805 }
2806 
2807 static void xfrm_statistics_fini(struct net *net)
2808 {
2809 }
2810 #endif
2811 
2812 static int __net_init xfrm_policy_init(struct net *net)
2813 {
2814 	unsigned int hmask, sz;
2815 	int dir;
2816 
2817 	if (net_eq(net, &init_net))
2818 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2819 					   sizeof(struct xfrm_dst),
2820 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2821 					   NULL);
2822 
2823 	hmask = 8 - 1;
2824 	sz = (hmask+1) * sizeof(struct hlist_head);
2825 
2826 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2827 	if (!net->xfrm.policy_byidx)
2828 		goto out_byidx;
2829 	net->xfrm.policy_idx_hmask = hmask;
2830 
2831 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2832 		struct xfrm_policy_hash *htab;
2833 
2834 		net->xfrm.policy_count[dir] = 0;
2835 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
2836 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2837 
2838 		htab = &net->xfrm.policy_bydst[dir];
2839 		htab->table = xfrm_hash_alloc(sz);
2840 		if (!htab->table)
2841 			goto out_bydst;
2842 		htab->hmask = hmask;
2843 		htab->dbits4 = 32;
2844 		htab->sbits4 = 32;
2845 		htab->dbits6 = 128;
2846 		htab->sbits6 = 128;
2847 	}
2848 	net->xfrm.policy_hthresh.lbits4 = 32;
2849 	net->xfrm.policy_hthresh.rbits4 = 32;
2850 	net->xfrm.policy_hthresh.lbits6 = 128;
2851 	net->xfrm.policy_hthresh.rbits6 = 128;
2852 
2853 	seqlock_init(&net->xfrm.policy_hthresh.lock);
2854 
2855 	INIT_LIST_HEAD(&net->xfrm.policy_all);
2856 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2857 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
2858 	return 0;
2859 
2860 out_bydst:
2861 	for (dir--; dir >= 0; dir--) {
2862 		struct xfrm_policy_hash *htab;
2863 
2864 		htab = &net->xfrm.policy_bydst[dir];
2865 		xfrm_hash_free(htab->table, sz);
2866 	}
2867 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2868 out_byidx:
2869 	return -ENOMEM;
2870 }
2871 
2872 static void xfrm_policy_fini(struct net *net)
2873 {
2874 	unsigned int sz;
2875 	int dir;
2876 
2877 	flush_work(&net->xfrm.policy_hash_work);
2878 #ifdef CONFIG_XFRM_SUB_POLICY
2879 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
2880 #endif
2881 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
2882 
2883 	WARN_ON(!list_empty(&net->xfrm.policy_all));
2884 
2885 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
2886 		struct xfrm_policy_hash *htab;
2887 
2888 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2889 
2890 		htab = &net->xfrm.policy_bydst[dir];
2891 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
2892 		WARN_ON(!hlist_empty(htab->table));
2893 		xfrm_hash_free(htab->table, sz);
2894 	}
2895 
2896 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2897 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2898 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2899 }
2900 
2901 static int __net_init xfrm_net_init(struct net *net)
2902 {
2903 	int rv;
2904 
2905 	/* Initialize the per-net locks here */
2906 	spin_lock_init(&net->xfrm.xfrm_state_lock);
2907 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
2908 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
2909 
2910 	rv = xfrm_statistics_init(net);
2911 	if (rv < 0)
2912 		goto out_statistics;
2913 	rv = xfrm_state_init(net);
2914 	if (rv < 0)
2915 		goto out_state;
2916 	rv = xfrm_policy_init(net);
2917 	if (rv < 0)
2918 		goto out_policy;
2919 	rv = xfrm_sysctl_init(net);
2920 	if (rv < 0)
2921 		goto out_sysctl;
2922 
2923 	return 0;
2924 
2925 out_sysctl:
2926 	xfrm_policy_fini(net);
2927 out_policy:
2928 	xfrm_state_fini(net);
2929 out_state:
2930 	xfrm_statistics_fini(net);
2931 out_statistics:
2932 	return rv;
2933 }
2934 
2935 static void __net_exit xfrm_net_exit(struct net *net)
2936 {
2937 	xfrm_sysctl_fini(net);
2938 	xfrm_policy_fini(net);
2939 	xfrm_state_fini(net);
2940 	xfrm_statistics_fini(net);
2941 }
2942 
2943 static struct pernet_operations __net_initdata xfrm_net_ops = {
2944 	.init = xfrm_net_init,
2945 	.exit = xfrm_net_exit,
2946 };
2947 
2948 void __init xfrm_init(void)
2949 {
2950 	register_pernet_subsys(&xfrm_net_ops);
2951 	xfrm_dev_init();
2952 	seqcount_init(&xfrm_policy_hash_generation);
2953 	xfrm_input_init();
2954 
2955 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
2956 	synchronize_rcu();
2957 }
2958 
2959 #ifdef CONFIG_AUDITSYSCALL
2960 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2961 					 struct audit_buffer *audit_buf)
2962 {
2963 	struct xfrm_sec_ctx *ctx = xp->security;
2964 	struct xfrm_selector *sel = &xp->selector;
2965 
2966 	if (ctx)
2967 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2968 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2969 
2970 	switch (sel->family) {
2971 	case AF_INET:
2972 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2973 		if (sel->prefixlen_s != 32)
2974 			audit_log_format(audit_buf, " src_prefixlen=%d",
2975 					 sel->prefixlen_s);
2976 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2977 		if (sel->prefixlen_d != 32)
2978 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2979 					 sel->prefixlen_d);
2980 		break;
2981 	case AF_INET6:
2982 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2983 		if (sel->prefixlen_s != 128)
2984 			audit_log_format(audit_buf, " src_prefixlen=%d",
2985 					 sel->prefixlen_s);
2986 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2987 		if (sel->prefixlen_d != 128)
2988 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2989 					 sel->prefixlen_d);
2990 		break;
2991 	}
2992 }
2993 
2994 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
2995 {
2996 	struct audit_buffer *audit_buf;
2997 
2998 	audit_buf = xfrm_audit_start("SPD-add");
2999 	if (audit_buf == NULL)
3000 		return;
3001 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3002 	audit_log_format(audit_buf, " res=%u", result);
3003 	xfrm_audit_common_policyinfo(xp, audit_buf);
3004 	audit_log_end(audit_buf);
3005 }
3006 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
3007 
3008 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
3009 			      bool task_valid)
3010 {
3011 	struct audit_buffer *audit_buf;
3012 
3013 	audit_buf = xfrm_audit_start("SPD-delete");
3014 	if (audit_buf == NULL)
3015 		return;
3016 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3017 	audit_log_format(audit_buf, " res=%u", result);
3018 	xfrm_audit_common_policyinfo(xp, audit_buf);
3019 	audit_log_end(audit_buf);
3020 }
3021 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
3022 #endif
3023 
3024 #ifdef CONFIG_XFRM_MIGRATE
3025 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
3026 					const struct xfrm_selector *sel_tgt)
3027 {
3028 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
3029 		if (sel_tgt->family == sel_cmp->family &&
3030 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
3031 				    sel_cmp->family) &&
3032 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
3033 				    sel_cmp->family) &&
3034 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
3035 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
3036 			return true;
3037 		}
3038 	} else {
3039 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
3040 			return true;
3041 		}
3042 	}
3043 	return false;
3044 }
3045 
3046 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
3047 						    u8 dir, u8 type, struct net *net)
3048 {
3049 	struct xfrm_policy *pol, *ret = NULL;
3050 	struct hlist_head *chain;
3051 	u32 priority = ~0U;
3052 
3053 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3054 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
3055 	hlist_for_each_entry(pol, chain, bydst) {
3056 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3057 		    pol->type == type) {
3058 			ret = pol;
3059 			priority = ret->priority;
3060 			break;
3061 		}
3062 	}
3063 	chain = &net->xfrm.policy_inexact[dir];
3064 	hlist_for_each_entry(pol, chain, bydst) {
3065 		if ((pol->priority >= priority) && ret)
3066 			break;
3067 
3068 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
3069 		    pol->type == type) {
3070 			ret = pol;
3071 			break;
3072 		}
3073 	}
3074 
3075 	xfrm_pol_hold(ret);
3076 
3077 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
3078 
3079 	return ret;
3080 }
3081 
3082 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
3083 {
3084 	int match = 0;
3085 
3086 	if (t->mode == m->mode && t->id.proto == m->proto &&
3087 	    (m->reqid == 0 || t->reqid == m->reqid)) {
3088 		switch (t->mode) {
3089 		case XFRM_MODE_TUNNEL:
3090 		case XFRM_MODE_BEET:
3091 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
3092 					    m->old_family) &&
3093 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
3094 					    m->old_family)) {
3095 				match = 1;
3096 			}
3097 			break;
3098 		case XFRM_MODE_TRANSPORT:
3099 			/* in case of transport mode, template does not store
3100 			   any IP addresses, hence we just compare mode and
3101 			   protocol */
3102 			match = 1;
3103 			break;
3104 		default:
3105 			break;
3106 		}
3107 	}
3108 	return match;
3109 }
3110 
3111 /* update endpoint address(es) of template(s) */
3112 static int xfrm_policy_migrate(struct xfrm_policy *pol,
3113 			       struct xfrm_migrate *m, int num_migrate)
3114 {
3115 	struct xfrm_migrate *mp;
3116 	int i, j, n = 0;
3117 
3118 	write_lock_bh(&pol->lock);
3119 	if (unlikely(pol->walk.dead)) {
3120 		/* target policy has been deleted */
3121 		write_unlock_bh(&pol->lock);
3122 		return -ENOENT;
3123 	}
3124 
3125 	for (i = 0; i < pol->xfrm_nr; i++) {
3126 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
3127 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
3128 				continue;
3129 			n++;
3130 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
3131 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
3132 				continue;
3133 			/* update endpoints */
3134 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
3135 			       sizeof(pol->xfrm_vec[i].id.daddr));
3136 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
3137 			       sizeof(pol->xfrm_vec[i].saddr));
3138 			pol->xfrm_vec[i].encap_family = mp->new_family;
3139 			/* flush bundles */
3140 			atomic_inc(&pol->genid);
3141 		}
3142 	}
3143 
3144 	write_unlock_bh(&pol->lock);
3145 
3146 	if (!n)
3147 		return -ENODATA;
3148 
3149 	return 0;
3150 }
3151 
3152 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
3153 {
3154 	int i, j;
3155 
3156 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
3157 		return -EINVAL;
3158 
3159 	for (i = 0; i < num_migrate; i++) {
3160 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
3161 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
3162 			return -EINVAL;
3163 
3164 		/* check if there is any duplicated entry */
3165 		for (j = i + 1; j < num_migrate; j++) {
3166 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
3167 				    sizeof(m[i].old_daddr)) &&
3168 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
3169 				    sizeof(m[i].old_saddr)) &&
3170 			    m[i].proto == m[j].proto &&
3171 			    m[i].mode == m[j].mode &&
3172 			    m[i].reqid == m[j].reqid &&
3173 			    m[i].old_family == m[j].old_family)
3174 				return -EINVAL;
3175 		}
3176 	}
3177 
3178 	return 0;
3179 }
3180 
3181 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3182 		 struct xfrm_migrate *m, int num_migrate,
3183 		 struct xfrm_kmaddress *k, struct net *net,
3184 		 struct xfrm_encap_tmpl *encap)
3185 {
3186 	int i, err, nx_cur = 0, nx_new = 0;
3187 	struct xfrm_policy *pol = NULL;
3188 	struct xfrm_state *x, *xc;
3189 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
3190 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
3191 	struct xfrm_migrate *mp;
3192 
3193 	/* Stage 0 - sanity checks */
3194 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
3195 		goto out;
3196 
3197 	if (dir >= XFRM_POLICY_MAX) {
3198 		err = -EINVAL;
3199 		goto out;
3200 	}
3201 
3202 	/* Stage 1 - find policy */
3203 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
3204 		err = -ENOENT;
3205 		goto out;
3206 	}
3207 
3208 	/* Stage 2 - find and update state(s) */
3209 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
3210 		if ((x = xfrm_migrate_state_find(mp, net))) {
3211 			x_cur[nx_cur] = x;
3212 			nx_cur++;
3213 			xc = xfrm_state_migrate(x, mp, encap);
3214 			if (xc) {
3215 				x_new[nx_new] = xc;
3216 				nx_new++;
3217 			} else {
3218 				err = -ENODATA;
3219 				goto restore_state;
3220 			}
3221 		}
3222 	}
3223 
3224 	/* Stage 3 - update policy */
3225 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
3226 		goto restore_state;
3227 
3228 	/* Stage 4 - delete old state(s) */
3229 	if (nx_cur) {
3230 		xfrm_states_put(x_cur, nx_cur);
3231 		xfrm_states_delete(x_cur, nx_cur);
3232 	}
3233 
3234 	/* Stage 5 - announce */
3235 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
3236 
3237 	xfrm_pol_put(pol);
3238 
3239 	return 0;
3240 out:
3241 	return err;
3242 
3243 restore_state:
3244 	if (pol)
3245 		xfrm_pol_put(pol);
3246 	if (nx_cur)
3247 		xfrm_states_put(x_cur, nx_cur);
3248 	if (nx_new)
3249 		xfrm_states_delete(x_new, nx_new);
3250 
3251 	return err;
3252 }
3253 EXPORT_SYMBOL(xfrm_migrate);
3254 #endif
3255