1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/inet_ecn.h>
35 #include <net/xfrm.h>
36 #include <net/ip.h>
37 #include <net/gre.h>
38 #if IS_ENABLED(CONFIG_IPV6_MIP6)
39 #include <net/mip6.h>
40 #endif
41 #ifdef CONFIG_XFRM_STATISTICS
42 #include <net/snmp.h>
43 #endif
44 #ifdef CONFIG_XFRM_ESPINTCP
45 #include <net/espintcp.h>
46 #endif
47
48 #include "xfrm_hash.h"
49
50 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
51 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
52 #define XFRM_MAX_QUEUE_LEN 100
53
54 struct xfrm_flo {
55 struct dst_entry *dst_orig;
56 u8 flags;
57 };
58
59 /* prefixes smaller than this are stored in lists, not trees. */
60 #define INEXACT_PREFIXLEN_IPV4 16
61 #define INEXACT_PREFIXLEN_IPV6 48
62
63 struct xfrm_pol_inexact_node {
64 struct rb_node node;
65 union {
66 xfrm_address_t addr;
67 struct rcu_head rcu;
68 };
69 u8 prefixlen;
70
71 struct rb_root root;
72
73 /* the policies matching this node, can be empty list */
74 struct hlist_head hhead;
75 };
76
77 /* xfrm inexact policy search tree:
78 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
79 * |
80 * +---- root_d: sorted by daddr:prefix
81 * | |
82 * | xfrm_pol_inexact_node
83 * | |
84 * | +- root: sorted by saddr/prefix
85 * | | |
86 * | | xfrm_pol_inexact_node
87 * | | |
88 * | | + root: unused
89 * | | |
90 * | | + hhead: saddr:daddr policies
91 * | |
92 * | +- coarse policies and all any:daddr policies
93 * |
94 * +---- root_s: sorted by saddr:prefix
95 * | |
96 * | xfrm_pol_inexact_node
97 * | |
98 * | + root: unused
99 * | |
100 * | + hhead: saddr:any policies
101 * |
102 * +---- coarse policies and all any:any policies
103 *
104 * Lookups return four candidate lists:
105 * 1. any:any list from top-level xfrm_pol_inexact_bin
106 * 2. any:daddr list from daddr tree
107 * 3. saddr:daddr list from 2nd level daddr tree
108 * 4. saddr:any list from saddr tree
109 *
110 * This result set then needs to be searched for the policy with
111 * the lowest priority. If two results have same prio, youngest one wins.
112 */
113
114 struct xfrm_pol_inexact_key {
115 possible_net_t net;
116 u32 if_id;
117 u16 family;
118 u8 dir, type;
119 };
120
121 struct xfrm_pol_inexact_bin {
122 struct xfrm_pol_inexact_key k;
123 struct rhash_head head;
124 /* list containing '*:*' policies */
125 struct hlist_head hhead;
126
127 seqcount_spinlock_t count;
128 /* tree sorted by daddr/prefix */
129 struct rb_root root_d;
130
131 /* tree sorted by saddr/prefix */
132 struct rb_root root_s;
133
134 /* slow path below */
135 struct list_head inexact_bins;
136 struct rcu_head rcu;
137 };
138
139 enum xfrm_pol_inexact_candidate_type {
140 XFRM_POL_CAND_BOTH,
141 XFRM_POL_CAND_SADDR,
142 XFRM_POL_CAND_DADDR,
143 XFRM_POL_CAND_ANY,
144
145 XFRM_POL_CAND_MAX,
146 };
147
148 struct xfrm_pol_inexact_candidates {
149 struct hlist_head *res[XFRM_POL_CAND_MAX];
150 };
151
152 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
153 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
154
155 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
156 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
157 __read_mostly;
158
159 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
160
161 static struct rhashtable xfrm_policy_inexact_table;
162 static const struct rhashtable_params xfrm_pol_inexact_params;
163
164 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
165 static int stale_bundle(struct dst_entry *dst);
166 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
167 static void xfrm_policy_queue_process(struct timer_list *t);
168
169 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
170 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
171 int dir);
172
173 static struct xfrm_pol_inexact_bin *
174 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
175 u32 if_id);
176
177 static struct xfrm_pol_inexact_bin *
178 xfrm_policy_inexact_lookup_rcu(struct net *net,
179 u8 type, u16 family, u8 dir, u32 if_id);
180 static struct xfrm_policy *
181 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
182 bool excl);
183 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
184 struct xfrm_policy *policy);
185
186 static bool
187 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
188 struct xfrm_pol_inexact_bin *b,
189 const xfrm_address_t *saddr,
190 const xfrm_address_t *daddr);
191
xfrm_pol_hold_rcu(struct xfrm_policy * policy)192 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
193 {
194 return refcount_inc_not_zero(&policy->refcnt);
195 }
196
197 static inline bool
__xfrm4_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)198 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
199 {
200 const struct flowi4 *fl4 = &fl->u.ip4;
201
202 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
203 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
204 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
205 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
206 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
207 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
208 }
209
210 static inline bool
__xfrm6_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)211 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
212 {
213 const struct flowi6 *fl6 = &fl->u.ip6;
214
215 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
216 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
217 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
218 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
219 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
220 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
221 }
222
xfrm_selector_match(const struct xfrm_selector * sel,const struct flowi * fl,unsigned short family)223 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
224 unsigned short family)
225 {
226 switch (family) {
227 case AF_INET:
228 return __xfrm4_selector_match(sel, fl);
229 case AF_INET6:
230 return __xfrm6_selector_match(sel, fl);
231 }
232 return false;
233 }
234
xfrm_policy_get_afinfo(unsigned short family)235 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
236 {
237 const struct xfrm_policy_afinfo *afinfo;
238
239 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
240 return NULL;
241 rcu_read_lock();
242 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
243 if (unlikely(!afinfo))
244 rcu_read_unlock();
245 return afinfo;
246 }
247
248 /* Called with rcu_read_lock(). */
xfrm_if_get_cb(void)249 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
250 {
251 return rcu_dereference(xfrm_if_cb);
252 }
253
__xfrm_dst_lookup(struct net * net,int tos,int oif,const xfrm_address_t * saddr,const xfrm_address_t * daddr,int family,u32 mark)254 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
255 const xfrm_address_t *saddr,
256 const xfrm_address_t *daddr,
257 int family, u32 mark)
258 {
259 const struct xfrm_policy_afinfo *afinfo;
260 struct dst_entry *dst;
261
262 afinfo = xfrm_policy_get_afinfo(family);
263 if (unlikely(afinfo == NULL))
264 return ERR_PTR(-EAFNOSUPPORT);
265
266 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
267
268 rcu_read_unlock();
269
270 return dst;
271 }
272 EXPORT_SYMBOL(__xfrm_dst_lookup);
273
xfrm_dst_lookup(struct xfrm_state * x,int tos,int oif,xfrm_address_t * prev_saddr,xfrm_address_t * prev_daddr,int family,u32 mark)274 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
275 int tos, int oif,
276 xfrm_address_t *prev_saddr,
277 xfrm_address_t *prev_daddr,
278 int family, u32 mark)
279 {
280 struct net *net = xs_net(x);
281 xfrm_address_t *saddr = &x->props.saddr;
282 xfrm_address_t *daddr = &x->id.daddr;
283 struct dst_entry *dst;
284
285 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
286 saddr = x->coaddr;
287 daddr = prev_daddr;
288 }
289 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
290 saddr = prev_saddr;
291 daddr = x->coaddr;
292 }
293
294 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
295
296 if (!IS_ERR(dst)) {
297 if (prev_saddr != saddr)
298 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
299 if (prev_daddr != daddr)
300 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
301 }
302
303 return dst;
304 }
305
make_jiffies(long secs)306 static inline unsigned long make_jiffies(long secs)
307 {
308 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
309 return MAX_SCHEDULE_TIMEOUT-1;
310 else
311 return secs*HZ;
312 }
313
xfrm_policy_timer(struct timer_list * t)314 static void xfrm_policy_timer(struct timer_list *t)
315 {
316 struct xfrm_policy *xp = from_timer(xp, t, timer);
317 time64_t now = ktime_get_real_seconds();
318 time64_t next = TIME64_MAX;
319 int warn = 0;
320 int dir;
321
322 read_lock(&xp->lock);
323
324 if (unlikely(xp->walk.dead))
325 goto out;
326
327 dir = xfrm_policy_id2dir(xp->index);
328
329 if (xp->lft.hard_add_expires_seconds) {
330 time64_t tmo = xp->lft.hard_add_expires_seconds +
331 xp->curlft.add_time - now;
332 if (tmo <= 0)
333 goto expired;
334 if (tmo < next)
335 next = tmo;
336 }
337 if (xp->lft.hard_use_expires_seconds) {
338 time64_t tmo = xp->lft.hard_use_expires_seconds +
339 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
340 if (tmo <= 0)
341 goto expired;
342 if (tmo < next)
343 next = tmo;
344 }
345 if (xp->lft.soft_add_expires_seconds) {
346 time64_t tmo = xp->lft.soft_add_expires_seconds +
347 xp->curlft.add_time - now;
348 if (tmo <= 0) {
349 warn = 1;
350 tmo = XFRM_KM_TIMEOUT;
351 }
352 if (tmo < next)
353 next = tmo;
354 }
355 if (xp->lft.soft_use_expires_seconds) {
356 time64_t tmo = xp->lft.soft_use_expires_seconds +
357 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
358 if (tmo <= 0) {
359 warn = 1;
360 tmo = XFRM_KM_TIMEOUT;
361 }
362 if (tmo < next)
363 next = tmo;
364 }
365
366 if (warn)
367 km_policy_expired(xp, dir, 0, 0);
368 if (next != TIME64_MAX &&
369 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
370 xfrm_pol_hold(xp);
371
372 out:
373 read_unlock(&xp->lock);
374 xfrm_pol_put(xp);
375 return;
376
377 expired:
378 read_unlock(&xp->lock);
379 if (!xfrm_policy_delete(xp, dir))
380 km_policy_expired(xp, dir, 1, 0);
381 xfrm_pol_put(xp);
382 }
383
384 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
385 * SPD calls.
386 */
387
xfrm_policy_alloc(struct net * net,gfp_t gfp)388 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
389 {
390 struct xfrm_policy *policy;
391
392 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
393
394 if (policy) {
395 write_pnet(&policy->xp_net, net);
396 INIT_LIST_HEAD(&policy->walk.all);
397 INIT_HLIST_NODE(&policy->bydst_inexact_list);
398 INIT_HLIST_NODE(&policy->bydst);
399 INIT_HLIST_NODE(&policy->byidx);
400 rwlock_init(&policy->lock);
401 refcount_set(&policy->refcnt, 1);
402 skb_queue_head_init(&policy->polq.hold_queue);
403 timer_setup(&policy->timer, xfrm_policy_timer, 0);
404 timer_setup(&policy->polq.hold_timer,
405 xfrm_policy_queue_process, 0);
406 }
407 return policy;
408 }
409 EXPORT_SYMBOL(xfrm_policy_alloc);
410
xfrm_policy_destroy_rcu(struct rcu_head * head)411 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
412 {
413 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
414
415 security_xfrm_policy_free(policy->security);
416 kfree(policy);
417 }
418
419 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
420
xfrm_policy_destroy(struct xfrm_policy * policy)421 void xfrm_policy_destroy(struct xfrm_policy *policy)
422 {
423 BUG_ON(!policy->walk.dead);
424
425 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
426 BUG();
427
428 xfrm_dev_policy_free(policy);
429 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
430 }
431 EXPORT_SYMBOL(xfrm_policy_destroy);
432
433 /* Rule must be locked. Release descendant resources, announce
434 * entry dead. The rule must be unlinked from lists to the moment.
435 */
436
xfrm_policy_kill(struct xfrm_policy * policy)437 static void xfrm_policy_kill(struct xfrm_policy *policy)
438 {
439 write_lock_bh(&policy->lock);
440 policy->walk.dead = 1;
441 write_unlock_bh(&policy->lock);
442
443 atomic_inc(&policy->genid);
444
445 if (del_timer(&policy->polq.hold_timer))
446 xfrm_pol_put(policy);
447 skb_queue_purge(&policy->polq.hold_queue);
448
449 if (del_timer(&policy->timer))
450 xfrm_pol_put(policy);
451
452 xfrm_pol_put(policy);
453 }
454
455 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
456
idx_hash(struct net * net,u32 index)457 static inline unsigned int idx_hash(struct net *net, u32 index)
458 {
459 return __idx_hash(index, net->xfrm.policy_idx_hmask);
460 }
461
462 /* calculate policy hash thresholds */
__get_hash_thresh(struct net * net,unsigned short family,int dir,u8 * dbits,u8 * sbits)463 static void __get_hash_thresh(struct net *net,
464 unsigned short family, int dir,
465 u8 *dbits, u8 *sbits)
466 {
467 switch (family) {
468 case AF_INET:
469 *dbits = net->xfrm.policy_bydst[dir].dbits4;
470 *sbits = net->xfrm.policy_bydst[dir].sbits4;
471 break;
472
473 case AF_INET6:
474 *dbits = net->xfrm.policy_bydst[dir].dbits6;
475 *sbits = net->xfrm.policy_bydst[dir].sbits6;
476 break;
477
478 default:
479 *dbits = 0;
480 *sbits = 0;
481 }
482 }
483
policy_hash_bysel(struct net * net,const struct xfrm_selector * sel,unsigned short family,int dir)484 static struct hlist_head *policy_hash_bysel(struct net *net,
485 const struct xfrm_selector *sel,
486 unsigned short family, int dir)
487 {
488 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
489 unsigned int hash;
490 u8 dbits;
491 u8 sbits;
492
493 __get_hash_thresh(net, family, dir, &dbits, &sbits);
494 hash = __sel_hash(sel, family, hmask, dbits, sbits);
495
496 if (hash == hmask + 1)
497 return NULL;
498
499 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
500 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
501 }
502
policy_hash_direct(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family,int dir)503 static struct hlist_head *policy_hash_direct(struct net *net,
504 const xfrm_address_t *daddr,
505 const xfrm_address_t *saddr,
506 unsigned short family, int dir)
507 {
508 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
509 unsigned int hash;
510 u8 dbits;
511 u8 sbits;
512
513 __get_hash_thresh(net, family, dir, &dbits, &sbits);
514 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
515
516 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
517 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
518 }
519
xfrm_dst_hash_transfer(struct net * net,struct hlist_head * list,struct hlist_head * ndsttable,unsigned int nhashmask,int dir)520 static void xfrm_dst_hash_transfer(struct net *net,
521 struct hlist_head *list,
522 struct hlist_head *ndsttable,
523 unsigned int nhashmask,
524 int dir)
525 {
526 struct hlist_node *tmp, *entry0 = NULL;
527 struct xfrm_policy *pol;
528 unsigned int h0 = 0;
529 u8 dbits;
530 u8 sbits;
531
532 redo:
533 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
534 unsigned int h;
535
536 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
537 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
538 pol->family, nhashmask, dbits, sbits);
539 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
540 hlist_del_rcu(&pol->bydst);
541 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
542 h0 = h;
543 } else {
544 if (h != h0)
545 continue;
546 hlist_del_rcu(&pol->bydst);
547 hlist_add_behind_rcu(&pol->bydst, entry0);
548 }
549 entry0 = &pol->bydst;
550 }
551 if (!hlist_empty(list)) {
552 entry0 = NULL;
553 goto redo;
554 }
555 }
556
xfrm_idx_hash_transfer(struct hlist_head * list,struct hlist_head * nidxtable,unsigned int nhashmask)557 static void xfrm_idx_hash_transfer(struct hlist_head *list,
558 struct hlist_head *nidxtable,
559 unsigned int nhashmask)
560 {
561 struct hlist_node *tmp;
562 struct xfrm_policy *pol;
563
564 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
565 unsigned int h;
566
567 h = __idx_hash(pol->index, nhashmask);
568 hlist_add_head(&pol->byidx, nidxtable+h);
569 }
570 }
571
xfrm_new_hash_mask(unsigned int old_hmask)572 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
573 {
574 return ((old_hmask + 1) << 1) - 1;
575 }
576
xfrm_bydst_resize(struct net * net,int dir)577 static void xfrm_bydst_resize(struct net *net, int dir)
578 {
579 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
580 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
581 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
582 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
583 struct hlist_head *odst;
584 int i;
585
586 if (!ndst)
587 return;
588
589 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
590 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
591
592 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
593 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
594
595 for (i = hmask; i >= 0; i--)
596 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
597
598 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
599 net->xfrm.policy_bydst[dir].hmask = nhashmask;
600
601 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
602 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
603
604 synchronize_rcu();
605
606 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
607 }
608
xfrm_byidx_resize(struct net * net)609 static void xfrm_byidx_resize(struct net *net)
610 {
611 unsigned int hmask = net->xfrm.policy_idx_hmask;
612 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
613 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
614 struct hlist_head *oidx = net->xfrm.policy_byidx;
615 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
616 int i;
617
618 if (!nidx)
619 return;
620
621 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
622
623 for (i = hmask; i >= 0; i--)
624 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
625
626 net->xfrm.policy_byidx = nidx;
627 net->xfrm.policy_idx_hmask = nhashmask;
628
629 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
630
631 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
632 }
633
xfrm_bydst_should_resize(struct net * net,int dir,int * total)634 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
635 {
636 unsigned int cnt = net->xfrm.policy_count[dir];
637 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
638
639 if (total)
640 *total += cnt;
641
642 if ((hmask + 1) < xfrm_policy_hashmax &&
643 cnt > hmask)
644 return 1;
645
646 return 0;
647 }
648
xfrm_byidx_should_resize(struct net * net,int total)649 static inline int xfrm_byidx_should_resize(struct net *net, int total)
650 {
651 unsigned int hmask = net->xfrm.policy_idx_hmask;
652
653 if ((hmask + 1) < xfrm_policy_hashmax &&
654 total > hmask)
655 return 1;
656
657 return 0;
658 }
659
xfrm_spd_getinfo(struct net * net,struct xfrmk_spdinfo * si)660 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
661 {
662 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
663 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
664 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
665 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
666 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
667 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
668 si->spdhcnt = net->xfrm.policy_idx_hmask;
669 si->spdhmcnt = xfrm_policy_hashmax;
670 }
671 EXPORT_SYMBOL(xfrm_spd_getinfo);
672
673 static DEFINE_MUTEX(hash_resize_mutex);
xfrm_hash_resize(struct work_struct * work)674 static void xfrm_hash_resize(struct work_struct *work)
675 {
676 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
677 int dir, total;
678
679 mutex_lock(&hash_resize_mutex);
680
681 total = 0;
682 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
683 if (xfrm_bydst_should_resize(net, dir, &total))
684 xfrm_bydst_resize(net, dir);
685 }
686 if (xfrm_byidx_should_resize(net, total))
687 xfrm_byidx_resize(net);
688
689 mutex_unlock(&hash_resize_mutex);
690 }
691
692 /* Make sure *pol can be inserted into fastbin.
693 * Useful to check that later insert requests will be successful
694 * (provided xfrm_policy_lock is held throughout).
695 */
696 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy * pol,u8 dir)697 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
698 {
699 struct xfrm_pol_inexact_bin *bin, *prev;
700 struct xfrm_pol_inexact_key k = {
701 .family = pol->family,
702 .type = pol->type,
703 .dir = dir,
704 .if_id = pol->if_id,
705 };
706 struct net *net = xp_net(pol);
707
708 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
709
710 write_pnet(&k.net, net);
711 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
712 xfrm_pol_inexact_params);
713 if (bin)
714 return bin;
715
716 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
717 if (!bin)
718 return NULL;
719
720 bin->k = k;
721 INIT_HLIST_HEAD(&bin->hhead);
722 bin->root_d = RB_ROOT;
723 bin->root_s = RB_ROOT;
724 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
725
726 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
727 &bin->k, &bin->head,
728 xfrm_pol_inexact_params);
729 if (!prev) {
730 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
731 return bin;
732 }
733
734 kfree(bin);
735
736 return IS_ERR(prev) ? NULL : prev;
737 }
738
xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t * addr,int family,u8 prefixlen)739 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
740 int family, u8 prefixlen)
741 {
742 if (xfrm_addr_any(addr, family))
743 return true;
744
745 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
746 return true;
747
748 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
749 return true;
750
751 return false;
752 }
753
754 static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy * policy)755 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
756 {
757 const xfrm_address_t *addr;
758 bool saddr_any, daddr_any;
759 u8 prefixlen;
760
761 addr = &policy->selector.saddr;
762 prefixlen = policy->selector.prefixlen_s;
763
764 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
765 policy->family,
766 prefixlen);
767 addr = &policy->selector.daddr;
768 prefixlen = policy->selector.prefixlen_d;
769 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
770 policy->family,
771 prefixlen);
772 return saddr_any && daddr_any;
773 }
774
xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node * node,const xfrm_address_t * addr,u8 prefixlen)775 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
776 const xfrm_address_t *addr, u8 prefixlen)
777 {
778 node->addr = *addr;
779 node->prefixlen = prefixlen;
780 }
781
782 static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t * addr,u8 prefixlen)783 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
784 {
785 struct xfrm_pol_inexact_node *node;
786
787 node = kzalloc(sizeof(*node), GFP_ATOMIC);
788 if (node)
789 xfrm_pol_inexact_node_init(node, addr, prefixlen);
790
791 return node;
792 }
793
xfrm_policy_addr_delta(const xfrm_address_t * a,const xfrm_address_t * b,u8 prefixlen,u16 family)794 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
795 const xfrm_address_t *b,
796 u8 prefixlen, u16 family)
797 {
798 u32 ma, mb, mask;
799 unsigned int pdw, pbi;
800 int delta = 0;
801
802 switch (family) {
803 case AF_INET:
804 if (prefixlen == 0)
805 return 0;
806 mask = ~0U << (32 - prefixlen);
807 ma = ntohl(a->a4) & mask;
808 mb = ntohl(b->a4) & mask;
809 if (ma < mb)
810 delta = -1;
811 else if (ma > mb)
812 delta = 1;
813 break;
814 case AF_INET6:
815 pdw = prefixlen >> 5;
816 pbi = prefixlen & 0x1f;
817
818 if (pdw) {
819 delta = memcmp(a->a6, b->a6, pdw << 2);
820 if (delta)
821 return delta;
822 }
823 if (pbi) {
824 mask = ~0U << (32 - pbi);
825 ma = ntohl(a->a6[pdw]) & mask;
826 mb = ntohl(b->a6[pdw]) & mask;
827 if (ma < mb)
828 delta = -1;
829 else if (ma > mb)
830 delta = 1;
831 }
832 break;
833 default:
834 break;
835 }
836
837 return delta;
838 }
839
xfrm_policy_inexact_list_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,u16 family)840 static void xfrm_policy_inexact_list_reinsert(struct net *net,
841 struct xfrm_pol_inexact_node *n,
842 u16 family)
843 {
844 unsigned int matched_s, matched_d;
845 struct xfrm_policy *policy, *p;
846
847 matched_s = 0;
848 matched_d = 0;
849
850 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
851 struct hlist_node *newpos = NULL;
852 bool matches_s, matches_d;
853
854 if (policy->walk.dead || !policy->bydst_reinsert)
855 continue;
856
857 WARN_ON_ONCE(policy->family != family);
858
859 policy->bydst_reinsert = false;
860 hlist_for_each_entry(p, &n->hhead, bydst) {
861 if (policy->priority > p->priority)
862 newpos = &p->bydst;
863 else if (policy->priority == p->priority &&
864 policy->pos > p->pos)
865 newpos = &p->bydst;
866 else
867 break;
868 }
869
870 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
871 hlist_add_behind_rcu(&policy->bydst, newpos);
872 else
873 hlist_add_head_rcu(&policy->bydst, &n->hhead);
874
875 /* paranoia checks follow.
876 * Check that the reinserted policy matches at least
877 * saddr or daddr for current node prefix.
878 *
879 * Matching both is fine, matching saddr in one policy
880 * (but not daddr) and then matching only daddr in another
881 * is a bug.
882 */
883 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
884 &n->addr,
885 n->prefixlen,
886 family) == 0;
887 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
888 &n->addr,
889 n->prefixlen,
890 family) == 0;
891 if (matches_s && matches_d)
892 continue;
893
894 WARN_ON_ONCE(!matches_s && !matches_d);
895 if (matches_s)
896 matched_s++;
897 if (matches_d)
898 matched_d++;
899 WARN_ON_ONCE(matched_s && matched_d);
900 }
901 }
902
xfrm_policy_inexact_node_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,struct rb_root * new,u16 family)903 static void xfrm_policy_inexact_node_reinsert(struct net *net,
904 struct xfrm_pol_inexact_node *n,
905 struct rb_root *new,
906 u16 family)
907 {
908 struct xfrm_pol_inexact_node *node;
909 struct rb_node **p, *parent;
910
911 /* we should not have another subtree here */
912 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
913 restart:
914 parent = NULL;
915 p = &new->rb_node;
916 while (*p) {
917 u8 prefixlen;
918 int delta;
919
920 parent = *p;
921 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
922
923 prefixlen = min(node->prefixlen, n->prefixlen);
924
925 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
926 prefixlen, family);
927 if (delta < 0) {
928 p = &parent->rb_left;
929 } else if (delta > 0) {
930 p = &parent->rb_right;
931 } else {
932 bool same_prefixlen = node->prefixlen == n->prefixlen;
933 struct xfrm_policy *tmp;
934
935 hlist_for_each_entry(tmp, &n->hhead, bydst) {
936 tmp->bydst_reinsert = true;
937 hlist_del_rcu(&tmp->bydst);
938 }
939
940 node->prefixlen = prefixlen;
941
942 xfrm_policy_inexact_list_reinsert(net, node, family);
943
944 if (same_prefixlen) {
945 kfree_rcu(n, rcu);
946 return;
947 }
948
949 rb_erase(*p, new);
950 kfree_rcu(n, rcu);
951 n = node;
952 goto restart;
953 }
954 }
955
956 rb_link_node_rcu(&n->node, parent, p);
957 rb_insert_color(&n->node, new);
958 }
959
960 /* merge nodes v and n */
xfrm_policy_inexact_node_merge(struct net * net,struct xfrm_pol_inexact_node * v,struct xfrm_pol_inexact_node * n,u16 family)961 static void xfrm_policy_inexact_node_merge(struct net *net,
962 struct xfrm_pol_inexact_node *v,
963 struct xfrm_pol_inexact_node *n,
964 u16 family)
965 {
966 struct xfrm_pol_inexact_node *node;
967 struct xfrm_policy *tmp;
968 struct rb_node *rnode;
969
970 /* To-be-merged node v has a subtree.
971 *
972 * Dismantle it and insert its nodes to n->root.
973 */
974 while ((rnode = rb_first(&v->root)) != NULL) {
975 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
976 rb_erase(&node->node, &v->root);
977 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
978 family);
979 }
980
981 hlist_for_each_entry(tmp, &v->hhead, bydst) {
982 tmp->bydst_reinsert = true;
983 hlist_del_rcu(&tmp->bydst);
984 }
985
986 xfrm_policy_inexact_list_reinsert(net, n, family);
987 }
988
989 static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net * net,struct rb_root * root,xfrm_address_t * addr,u16 family,u8 prefixlen,u8 dir)990 xfrm_policy_inexact_insert_node(struct net *net,
991 struct rb_root *root,
992 xfrm_address_t *addr,
993 u16 family, u8 prefixlen, u8 dir)
994 {
995 struct xfrm_pol_inexact_node *cached = NULL;
996 struct rb_node **p, *parent = NULL;
997 struct xfrm_pol_inexact_node *node;
998
999 p = &root->rb_node;
1000 while (*p) {
1001 int delta;
1002
1003 parent = *p;
1004 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1005
1006 delta = xfrm_policy_addr_delta(addr, &node->addr,
1007 node->prefixlen,
1008 family);
1009 if (delta == 0 && prefixlen >= node->prefixlen) {
1010 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1011 return node;
1012 }
1013
1014 if (delta < 0)
1015 p = &parent->rb_left;
1016 else
1017 p = &parent->rb_right;
1018
1019 if (prefixlen < node->prefixlen) {
1020 delta = xfrm_policy_addr_delta(addr, &node->addr,
1021 prefixlen,
1022 family);
1023 if (delta)
1024 continue;
1025
1026 /* This node is a subnet of the new prefix. It needs
1027 * to be removed and re-inserted with the smaller
1028 * prefix and all nodes that are now also covered
1029 * by the reduced prefixlen.
1030 */
1031 rb_erase(&node->node, root);
1032
1033 if (!cached) {
1034 xfrm_pol_inexact_node_init(node, addr,
1035 prefixlen);
1036 cached = node;
1037 } else {
1038 /* This node also falls within the new
1039 * prefixlen. Merge the to-be-reinserted
1040 * node and this one.
1041 */
1042 xfrm_policy_inexact_node_merge(net, node,
1043 cached, family);
1044 kfree_rcu(node, rcu);
1045 }
1046
1047 /* restart */
1048 p = &root->rb_node;
1049 parent = NULL;
1050 }
1051 }
1052
1053 node = cached;
1054 if (!node) {
1055 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1056 if (!node)
1057 return NULL;
1058 }
1059
1060 rb_link_node_rcu(&node->node, parent, p);
1061 rb_insert_color(&node->node, root);
1062
1063 return node;
1064 }
1065
xfrm_policy_inexact_gc_tree(struct rb_root * r,bool rm)1066 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1067 {
1068 struct xfrm_pol_inexact_node *node;
1069 struct rb_node *rn = rb_first(r);
1070
1071 while (rn) {
1072 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1073
1074 xfrm_policy_inexact_gc_tree(&node->root, rm);
1075 rn = rb_next(rn);
1076
1077 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1078 WARN_ON_ONCE(rm);
1079 continue;
1080 }
1081
1082 rb_erase(&node->node, r);
1083 kfree_rcu(node, rcu);
1084 }
1085 }
1086
__xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b,bool net_exit)1087 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1088 {
1089 write_seqcount_begin(&b->count);
1090 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1091 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1092 write_seqcount_end(&b->count);
1093
1094 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1095 !hlist_empty(&b->hhead)) {
1096 WARN_ON_ONCE(net_exit);
1097 return;
1098 }
1099
1100 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1101 xfrm_pol_inexact_params) == 0) {
1102 list_del(&b->inexact_bins);
1103 kfree_rcu(b, rcu);
1104 }
1105 }
1106
xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b)1107 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1108 {
1109 struct net *net = read_pnet(&b->k.net);
1110
1111 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1112 __xfrm_policy_inexact_prune_bin(b, false);
1113 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1114 }
1115
__xfrm_policy_inexact_flush(struct net * net)1116 static void __xfrm_policy_inexact_flush(struct net *net)
1117 {
1118 struct xfrm_pol_inexact_bin *bin, *t;
1119
1120 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1121
1122 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1123 __xfrm_policy_inexact_prune_bin(bin, false);
1124 }
1125
1126 static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin * bin,struct xfrm_policy * policy,u8 dir)1127 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1128 struct xfrm_policy *policy, u8 dir)
1129 {
1130 struct xfrm_pol_inexact_node *n;
1131 struct net *net;
1132
1133 net = xp_net(policy);
1134 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1135
1136 if (xfrm_policy_inexact_insert_use_any_list(policy))
1137 return &bin->hhead;
1138
1139 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1140 policy->family,
1141 policy->selector.prefixlen_d)) {
1142 write_seqcount_begin(&bin->count);
1143 n = xfrm_policy_inexact_insert_node(net,
1144 &bin->root_s,
1145 &policy->selector.saddr,
1146 policy->family,
1147 policy->selector.prefixlen_s,
1148 dir);
1149 write_seqcount_end(&bin->count);
1150 if (!n)
1151 return NULL;
1152
1153 return &n->hhead;
1154 }
1155
1156 /* daddr is fixed */
1157 write_seqcount_begin(&bin->count);
1158 n = xfrm_policy_inexact_insert_node(net,
1159 &bin->root_d,
1160 &policy->selector.daddr,
1161 policy->family,
1162 policy->selector.prefixlen_d, dir);
1163 write_seqcount_end(&bin->count);
1164 if (!n)
1165 return NULL;
1166
1167 /* saddr is wildcard */
1168 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1169 policy->family,
1170 policy->selector.prefixlen_s))
1171 return &n->hhead;
1172
1173 write_seqcount_begin(&bin->count);
1174 n = xfrm_policy_inexact_insert_node(net,
1175 &n->root,
1176 &policy->selector.saddr,
1177 policy->family,
1178 policy->selector.prefixlen_s, dir);
1179 write_seqcount_end(&bin->count);
1180 if (!n)
1181 return NULL;
1182
1183 return &n->hhead;
1184 }
1185
1186 static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy * policy,u8 dir,int excl)1187 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1188 {
1189 struct xfrm_pol_inexact_bin *bin;
1190 struct xfrm_policy *delpol;
1191 struct hlist_head *chain;
1192 struct net *net;
1193
1194 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1195 if (!bin)
1196 return ERR_PTR(-ENOMEM);
1197
1198 net = xp_net(policy);
1199 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1200
1201 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1202 if (!chain) {
1203 __xfrm_policy_inexact_prune_bin(bin, false);
1204 return ERR_PTR(-ENOMEM);
1205 }
1206
1207 delpol = xfrm_policy_insert_list(chain, policy, excl);
1208 if (delpol && excl) {
1209 __xfrm_policy_inexact_prune_bin(bin, false);
1210 return ERR_PTR(-EEXIST);
1211 }
1212
1213 chain = &net->xfrm.policy_inexact[dir];
1214 xfrm_policy_insert_inexact_list(chain, policy);
1215
1216 if (delpol)
1217 __xfrm_policy_inexact_prune_bin(bin, false);
1218
1219 return delpol;
1220 }
1221
xfrm_hash_rebuild(struct work_struct * work)1222 static void xfrm_hash_rebuild(struct work_struct *work)
1223 {
1224 struct net *net = container_of(work, struct net,
1225 xfrm.policy_hthresh.work);
1226 unsigned int hmask;
1227 struct xfrm_policy *pol;
1228 struct xfrm_policy *policy;
1229 struct hlist_head *chain;
1230 struct hlist_head *odst;
1231 struct hlist_node *newpos;
1232 int i;
1233 int dir;
1234 unsigned seq;
1235 u8 lbits4, rbits4, lbits6, rbits6;
1236
1237 mutex_lock(&hash_resize_mutex);
1238
1239 /* read selector prefixlen thresholds */
1240 do {
1241 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1242
1243 lbits4 = net->xfrm.policy_hthresh.lbits4;
1244 rbits4 = net->xfrm.policy_hthresh.rbits4;
1245 lbits6 = net->xfrm.policy_hthresh.lbits6;
1246 rbits6 = net->xfrm.policy_hthresh.rbits6;
1247 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1248
1249 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1250 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1251
1252 /* make sure that we can insert the indirect policies again before
1253 * we start with destructive action.
1254 */
1255 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1256 struct xfrm_pol_inexact_bin *bin;
1257 u8 dbits, sbits;
1258
1259 if (policy->walk.dead)
1260 continue;
1261
1262 dir = xfrm_policy_id2dir(policy->index);
1263 if (dir >= XFRM_POLICY_MAX)
1264 continue;
1265
1266 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1267 if (policy->family == AF_INET) {
1268 dbits = rbits4;
1269 sbits = lbits4;
1270 } else {
1271 dbits = rbits6;
1272 sbits = lbits6;
1273 }
1274 } else {
1275 if (policy->family == AF_INET) {
1276 dbits = lbits4;
1277 sbits = rbits4;
1278 } else {
1279 dbits = lbits6;
1280 sbits = rbits6;
1281 }
1282 }
1283
1284 if (policy->selector.prefixlen_d < dbits ||
1285 policy->selector.prefixlen_s < sbits)
1286 continue;
1287
1288 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1289 if (!bin)
1290 goto out_unlock;
1291
1292 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1293 goto out_unlock;
1294 }
1295
1296 /* reset the bydst and inexact table in all directions */
1297 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1298 struct hlist_node *n;
1299
1300 hlist_for_each_entry_safe(policy, n,
1301 &net->xfrm.policy_inexact[dir],
1302 bydst_inexact_list) {
1303 hlist_del_rcu(&policy->bydst);
1304 hlist_del_init(&policy->bydst_inexact_list);
1305 }
1306
1307 hmask = net->xfrm.policy_bydst[dir].hmask;
1308 odst = net->xfrm.policy_bydst[dir].table;
1309 for (i = hmask; i >= 0; i--) {
1310 hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1311 hlist_del_rcu(&policy->bydst);
1312 }
1313 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1314 /* dir out => dst = remote, src = local */
1315 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1316 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1317 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1318 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1319 } else {
1320 /* dir in/fwd => dst = local, src = remote */
1321 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1322 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1323 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1324 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1325 }
1326 }
1327
1328 /* re-insert all policies by order of creation */
1329 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1330 if (policy->walk.dead)
1331 continue;
1332 dir = xfrm_policy_id2dir(policy->index);
1333 if (dir >= XFRM_POLICY_MAX) {
1334 /* skip socket policies */
1335 continue;
1336 }
1337 newpos = NULL;
1338 chain = policy_hash_bysel(net, &policy->selector,
1339 policy->family, dir);
1340
1341 if (!chain) {
1342 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1343
1344 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1345 continue;
1346 }
1347
1348 hlist_for_each_entry(pol, chain, bydst) {
1349 if (policy->priority >= pol->priority)
1350 newpos = &pol->bydst;
1351 else
1352 break;
1353 }
1354 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1355 hlist_add_behind_rcu(&policy->bydst, newpos);
1356 else
1357 hlist_add_head_rcu(&policy->bydst, chain);
1358 }
1359
1360 out_unlock:
1361 __xfrm_policy_inexact_flush(net);
1362 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1363 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1364
1365 mutex_unlock(&hash_resize_mutex);
1366 }
1367
xfrm_policy_hash_rebuild(struct net * net)1368 void xfrm_policy_hash_rebuild(struct net *net)
1369 {
1370 schedule_work(&net->xfrm.policy_hthresh.work);
1371 }
1372 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1373
1374 /* Generate new index... KAME seems to generate them ordered by cost
1375 * of an absolute inpredictability of ordering of rules. This will not pass. */
xfrm_gen_index(struct net * net,int dir,u32 index)1376 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1377 {
1378 for (;;) {
1379 struct hlist_head *list;
1380 struct xfrm_policy *p;
1381 u32 idx;
1382 int found;
1383
1384 if (!index) {
1385 idx = (net->xfrm.idx_generator | dir);
1386 net->xfrm.idx_generator += 8;
1387 } else {
1388 idx = index;
1389 index = 0;
1390 }
1391
1392 if (idx == 0)
1393 idx = 8;
1394 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1395 found = 0;
1396 hlist_for_each_entry(p, list, byidx) {
1397 if (p->index == idx) {
1398 found = 1;
1399 break;
1400 }
1401 }
1402 if (!found)
1403 return idx;
1404 }
1405 }
1406
selector_cmp(struct xfrm_selector * s1,struct xfrm_selector * s2)1407 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1408 {
1409 u32 *p1 = (u32 *) s1;
1410 u32 *p2 = (u32 *) s2;
1411 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1412 int i;
1413
1414 for (i = 0; i < len; i++) {
1415 if (p1[i] != p2[i])
1416 return 1;
1417 }
1418
1419 return 0;
1420 }
1421
xfrm_policy_requeue(struct xfrm_policy * old,struct xfrm_policy * new)1422 static void xfrm_policy_requeue(struct xfrm_policy *old,
1423 struct xfrm_policy *new)
1424 {
1425 struct xfrm_policy_queue *pq = &old->polq;
1426 struct sk_buff_head list;
1427
1428 if (skb_queue_empty(&pq->hold_queue))
1429 return;
1430
1431 __skb_queue_head_init(&list);
1432
1433 spin_lock_bh(&pq->hold_queue.lock);
1434 skb_queue_splice_init(&pq->hold_queue, &list);
1435 if (del_timer(&pq->hold_timer))
1436 xfrm_pol_put(old);
1437 spin_unlock_bh(&pq->hold_queue.lock);
1438
1439 pq = &new->polq;
1440
1441 spin_lock_bh(&pq->hold_queue.lock);
1442 skb_queue_splice(&list, &pq->hold_queue);
1443 pq->timeout = XFRM_QUEUE_TMO_MIN;
1444 if (!mod_timer(&pq->hold_timer, jiffies))
1445 xfrm_pol_hold(new);
1446 spin_unlock_bh(&pq->hold_queue.lock);
1447 }
1448
xfrm_policy_mark_match(const struct xfrm_mark * mark,struct xfrm_policy * pol)1449 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1450 struct xfrm_policy *pol)
1451 {
1452 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1453 }
1454
xfrm_pol_bin_key(const void * data,u32 len,u32 seed)1455 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1456 {
1457 const struct xfrm_pol_inexact_key *k = data;
1458 u32 a = k->type << 24 | k->dir << 16 | k->family;
1459
1460 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1461 seed);
1462 }
1463
xfrm_pol_bin_obj(const void * data,u32 len,u32 seed)1464 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1465 {
1466 const struct xfrm_pol_inexact_bin *b = data;
1467
1468 return xfrm_pol_bin_key(&b->k, 0, seed);
1469 }
1470
xfrm_pol_bin_cmp(struct rhashtable_compare_arg * arg,const void * ptr)1471 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1472 const void *ptr)
1473 {
1474 const struct xfrm_pol_inexact_key *key = arg->key;
1475 const struct xfrm_pol_inexact_bin *b = ptr;
1476 int ret;
1477
1478 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1479 return -1;
1480
1481 ret = b->k.dir ^ key->dir;
1482 if (ret)
1483 return ret;
1484
1485 ret = b->k.type ^ key->type;
1486 if (ret)
1487 return ret;
1488
1489 ret = b->k.family ^ key->family;
1490 if (ret)
1491 return ret;
1492
1493 return b->k.if_id ^ key->if_id;
1494 }
1495
1496 static const struct rhashtable_params xfrm_pol_inexact_params = {
1497 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1498 .hashfn = xfrm_pol_bin_key,
1499 .obj_hashfn = xfrm_pol_bin_obj,
1500 .obj_cmpfn = xfrm_pol_bin_cmp,
1501 .automatic_shrinking = true,
1502 };
1503
xfrm_policy_insert_inexact_list(struct hlist_head * chain,struct xfrm_policy * policy)1504 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1505 struct xfrm_policy *policy)
1506 {
1507 struct xfrm_policy *pol, *delpol = NULL;
1508 struct hlist_node *newpos = NULL;
1509 int i = 0;
1510
1511 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1512 if (pol->type == policy->type &&
1513 pol->if_id == policy->if_id &&
1514 !selector_cmp(&pol->selector, &policy->selector) &&
1515 xfrm_policy_mark_match(&policy->mark, pol) &&
1516 xfrm_sec_ctx_match(pol->security, policy->security) &&
1517 !WARN_ON(delpol)) {
1518 delpol = pol;
1519 if (policy->priority > pol->priority)
1520 continue;
1521 } else if (policy->priority >= pol->priority) {
1522 newpos = &pol->bydst_inexact_list;
1523 continue;
1524 }
1525 if (delpol)
1526 break;
1527 }
1528
1529 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1530 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1531 else
1532 hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1533
1534 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1535 pol->pos = i;
1536 i++;
1537 }
1538 }
1539
xfrm_policy_insert_list(struct hlist_head * chain,struct xfrm_policy * policy,bool excl)1540 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1541 struct xfrm_policy *policy,
1542 bool excl)
1543 {
1544 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1545
1546 hlist_for_each_entry(pol, chain, bydst) {
1547 if (pol->type == policy->type &&
1548 pol->if_id == policy->if_id &&
1549 !selector_cmp(&pol->selector, &policy->selector) &&
1550 xfrm_policy_mark_match(&policy->mark, pol) &&
1551 xfrm_sec_ctx_match(pol->security, policy->security) &&
1552 !WARN_ON(delpol)) {
1553 if (excl)
1554 return ERR_PTR(-EEXIST);
1555 delpol = pol;
1556 if (policy->priority > pol->priority)
1557 continue;
1558 } else if (policy->priority >= pol->priority) {
1559 newpos = pol;
1560 continue;
1561 }
1562 if (delpol)
1563 break;
1564 }
1565
1566 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1567 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1568 else
1569 /* Packet offload policies enter to the head
1570 * to speed-up lookups.
1571 */
1572 hlist_add_head_rcu(&policy->bydst, chain);
1573
1574 return delpol;
1575 }
1576
xfrm_policy_insert(int dir,struct xfrm_policy * policy,int excl)1577 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1578 {
1579 struct net *net = xp_net(policy);
1580 struct xfrm_policy *delpol;
1581 struct hlist_head *chain;
1582
1583 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1584 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1585 if (chain)
1586 delpol = xfrm_policy_insert_list(chain, policy, excl);
1587 else
1588 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1589
1590 if (IS_ERR(delpol)) {
1591 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1592 return PTR_ERR(delpol);
1593 }
1594
1595 __xfrm_policy_link(policy, dir);
1596
1597 /* After previous checking, family can either be AF_INET or AF_INET6 */
1598 if (policy->family == AF_INET)
1599 rt_genid_bump_ipv4(net);
1600 else
1601 rt_genid_bump_ipv6(net);
1602
1603 if (delpol) {
1604 xfrm_policy_requeue(delpol, policy);
1605 __xfrm_policy_unlink(delpol, dir);
1606 }
1607 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1608 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1609 policy->curlft.add_time = ktime_get_real_seconds();
1610 policy->curlft.use_time = 0;
1611 if (!mod_timer(&policy->timer, jiffies + HZ))
1612 xfrm_pol_hold(policy);
1613 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1614
1615 if (delpol)
1616 xfrm_policy_kill(delpol);
1617 else if (xfrm_bydst_should_resize(net, dir, NULL))
1618 schedule_work(&net->xfrm.policy_hash_work);
1619
1620 return 0;
1621 }
1622 EXPORT_SYMBOL(xfrm_policy_insert);
1623
1624 static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head * chain,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx)1625 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1626 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1627 struct xfrm_sec_ctx *ctx)
1628 {
1629 struct xfrm_policy *pol;
1630
1631 if (!chain)
1632 return NULL;
1633
1634 hlist_for_each_entry(pol, chain, bydst) {
1635 if (pol->type == type &&
1636 pol->if_id == if_id &&
1637 xfrm_policy_mark_match(mark, pol) &&
1638 !selector_cmp(sel, &pol->selector) &&
1639 xfrm_sec_ctx_match(ctx, pol->security))
1640 return pol;
1641 }
1642
1643 return NULL;
1644 }
1645
1646 struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx,int delete,int * err)1647 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1648 u8 type, int dir, struct xfrm_selector *sel,
1649 struct xfrm_sec_ctx *ctx, int delete, int *err)
1650 {
1651 struct xfrm_pol_inexact_bin *bin = NULL;
1652 struct xfrm_policy *pol, *ret = NULL;
1653 struct hlist_head *chain;
1654
1655 *err = 0;
1656 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1657 chain = policy_hash_bysel(net, sel, sel->family, dir);
1658 if (!chain) {
1659 struct xfrm_pol_inexact_candidates cand;
1660 int i;
1661
1662 bin = xfrm_policy_inexact_lookup(net, type,
1663 sel->family, dir, if_id);
1664 if (!bin) {
1665 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1666 return NULL;
1667 }
1668
1669 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1670 &sel->saddr,
1671 &sel->daddr)) {
1672 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1673 return NULL;
1674 }
1675
1676 pol = NULL;
1677 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1678 struct xfrm_policy *tmp;
1679
1680 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1681 if_id, type, dir,
1682 sel, ctx);
1683 if (!tmp)
1684 continue;
1685
1686 if (!pol || tmp->pos < pol->pos)
1687 pol = tmp;
1688 }
1689 } else {
1690 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1691 sel, ctx);
1692 }
1693
1694 if (pol) {
1695 xfrm_pol_hold(pol);
1696 if (delete) {
1697 *err = security_xfrm_policy_delete(pol->security);
1698 if (*err) {
1699 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1700 return pol;
1701 }
1702 __xfrm_policy_unlink(pol, dir);
1703 }
1704 ret = pol;
1705 }
1706 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1707
1708 if (ret && delete)
1709 xfrm_policy_kill(ret);
1710 if (bin && delete)
1711 xfrm_policy_inexact_prune_bin(bin);
1712 return ret;
1713 }
1714 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1715
1716 struct xfrm_policy *
xfrm_policy_byid(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,u32 id,int delete,int * err)1717 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1718 u8 type, int dir, u32 id, int delete, int *err)
1719 {
1720 struct xfrm_policy *pol, *ret;
1721 struct hlist_head *chain;
1722
1723 *err = -ENOENT;
1724 if (xfrm_policy_id2dir(id) != dir)
1725 return NULL;
1726
1727 *err = 0;
1728 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1729 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1730 ret = NULL;
1731 hlist_for_each_entry(pol, chain, byidx) {
1732 if (pol->type == type && pol->index == id &&
1733 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1734 xfrm_pol_hold(pol);
1735 if (delete) {
1736 *err = security_xfrm_policy_delete(
1737 pol->security);
1738 if (*err) {
1739 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1740 return pol;
1741 }
1742 __xfrm_policy_unlink(pol, dir);
1743 }
1744 ret = pol;
1745 break;
1746 }
1747 }
1748 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1749
1750 if (ret && delete)
1751 xfrm_policy_kill(ret);
1752 return ret;
1753 }
1754 EXPORT_SYMBOL(xfrm_policy_byid);
1755
1756 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1757 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1758 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1759 {
1760 struct xfrm_policy *pol;
1761 int err = 0;
1762
1763 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1764 if (pol->walk.dead ||
1765 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1766 pol->type != type)
1767 continue;
1768
1769 err = security_xfrm_policy_delete(pol->security);
1770 if (err) {
1771 xfrm_audit_policy_delete(pol, 0, task_valid);
1772 return err;
1773 }
1774 }
1775 return err;
1776 }
1777
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1778 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1779 struct net_device *dev,
1780 bool task_valid)
1781 {
1782 struct xfrm_policy *pol;
1783 int err = 0;
1784
1785 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1786 if (pol->walk.dead ||
1787 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1788 pol->xdo.dev != dev)
1789 continue;
1790
1791 err = security_xfrm_policy_delete(pol->security);
1792 if (err) {
1793 xfrm_audit_policy_delete(pol, 0, task_valid);
1794 return err;
1795 }
1796 }
1797 return err;
1798 }
1799 #else
1800 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1801 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1802 {
1803 return 0;
1804 }
1805
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1806 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1807 struct net_device *dev,
1808 bool task_valid)
1809 {
1810 return 0;
1811 }
1812 #endif
1813
xfrm_policy_flush(struct net * net,u8 type,bool task_valid)1814 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1815 {
1816 int dir, err = 0, cnt = 0;
1817 struct xfrm_policy *pol;
1818
1819 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1820
1821 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1822 if (err)
1823 goto out;
1824
1825 again:
1826 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1827 if (pol->walk.dead)
1828 continue;
1829
1830 dir = xfrm_policy_id2dir(pol->index);
1831 if (dir >= XFRM_POLICY_MAX ||
1832 pol->type != type)
1833 continue;
1834
1835 __xfrm_policy_unlink(pol, dir);
1836 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1837 xfrm_dev_policy_delete(pol);
1838 cnt++;
1839 xfrm_audit_policy_delete(pol, 1, task_valid);
1840 xfrm_policy_kill(pol);
1841 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1842 goto again;
1843 }
1844 if (cnt)
1845 __xfrm_policy_inexact_flush(net);
1846 else
1847 err = -ESRCH;
1848 out:
1849 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1850 return err;
1851 }
1852 EXPORT_SYMBOL(xfrm_policy_flush);
1853
xfrm_dev_policy_flush(struct net * net,struct net_device * dev,bool task_valid)1854 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1855 bool task_valid)
1856 {
1857 int dir, err = 0, cnt = 0;
1858 struct xfrm_policy *pol;
1859
1860 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1861
1862 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1863 if (err)
1864 goto out;
1865
1866 again:
1867 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1868 if (pol->walk.dead)
1869 continue;
1870
1871 dir = xfrm_policy_id2dir(pol->index);
1872 if (dir >= XFRM_POLICY_MAX ||
1873 pol->xdo.dev != dev)
1874 continue;
1875
1876 __xfrm_policy_unlink(pol, dir);
1877 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1878 xfrm_dev_policy_delete(pol);
1879 cnt++;
1880 xfrm_audit_policy_delete(pol, 1, task_valid);
1881 xfrm_policy_kill(pol);
1882 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1883 goto again;
1884 }
1885 if (cnt)
1886 __xfrm_policy_inexact_flush(net);
1887 else
1888 err = -ESRCH;
1889 out:
1890 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1891 return err;
1892 }
1893 EXPORT_SYMBOL(xfrm_dev_policy_flush);
1894
xfrm_policy_walk(struct net * net,struct xfrm_policy_walk * walk,int (* func)(struct xfrm_policy *,int,int,void *),void * data)1895 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1896 int (*func)(struct xfrm_policy *, int, int, void*),
1897 void *data)
1898 {
1899 struct xfrm_policy *pol;
1900 struct xfrm_policy_walk_entry *x;
1901 int error = 0;
1902
1903 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1904 walk->type != XFRM_POLICY_TYPE_ANY)
1905 return -EINVAL;
1906
1907 if (list_empty(&walk->walk.all) && walk->seq != 0)
1908 return 0;
1909
1910 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1911 if (list_empty(&walk->walk.all))
1912 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1913 else
1914 x = list_first_entry(&walk->walk.all,
1915 struct xfrm_policy_walk_entry, all);
1916
1917 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1918 if (x->dead)
1919 continue;
1920 pol = container_of(x, struct xfrm_policy, walk);
1921 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1922 walk->type != pol->type)
1923 continue;
1924 error = func(pol, xfrm_policy_id2dir(pol->index),
1925 walk->seq, data);
1926 if (error) {
1927 list_move_tail(&walk->walk.all, &x->all);
1928 goto out;
1929 }
1930 walk->seq++;
1931 }
1932 if (walk->seq == 0) {
1933 error = -ENOENT;
1934 goto out;
1935 }
1936 list_del_init(&walk->walk.all);
1937 out:
1938 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1939 return error;
1940 }
1941 EXPORT_SYMBOL(xfrm_policy_walk);
1942
xfrm_policy_walk_init(struct xfrm_policy_walk * walk,u8 type)1943 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1944 {
1945 INIT_LIST_HEAD(&walk->walk.all);
1946 walk->walk.dead = 1;
1947 walk->type = type;
1948 walk->seq = 0;
1949 }
1950 EXPORT_SYMBOL(xfrm_policy_walk_init);
1951
xfrm_policy_walk_done(struct xfrm_policy_walk * walk,struct net * net)1952 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1953 {
1954 if (list_empty(&walk->walk.all))
1955 return;
1956
1957 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1958 list_del(&walk->walk.all);
1959 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1960 }
1961 EXPORT_SYMBOL(xfrm_policy_walk_done);
1962
1963 /*
1964 * Find policy to apply to this flow.
1965 *
1966 * Returns 0 if policy found, else an -errno.
1967 */
xfrm_policy_match(const struct xfrm_policy * pol,const struct flowi * fl,u8 type,u16 family,u32 if_id)1968 static int xfrm_policy_match(const struct xfrm_policy *pol,
1969 const struct flowi *fl,
1970 u8 type, u16 family, u32 if_id)
1971 {
1972 const struct xfrm_selector *sel = &pol->selector;
1973 int ret = -ESRCH;
1974 bool match;
1975
1976 if (pol->family != family ||
1977 pol->if_id != if_id ||
1978 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1979 pol->type != type)
1980 return ret;
1981
1982 match = xfrm_selector_match(sel, fl, family);
1983 if (match)
1984 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1985 return ret;
1986 }
1987
1988 static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root * r,seqcount_spinlock_t * count,const xfrm_address_t * addr,u16 family)1989 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1990 seqcount_spinlock_t *count,
1991 const xfrm_address_t *addr, u16 family)
1992 {
1993 const struct rb_node *parent;
1994 int seq;
1995
1996 again:
1997 seq = read_seqcount_begin(count);
1998
1999 parent = rcu_dereference_raw(r->rb_node);
2000 while (parent) {
2001 struct xfrm_pol_inexact_node *node;
2002 int delta;
2003
2004 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2005
2006 delta = xfrm_policy_addr_delta(addr, &node->addr,
2007 node->prefixlen, family);
2008 if (delta < 0) {
2009 parent = rcu_dereference_raw(parent->rb_left);
2010 continue;
2011 } else if (delta > 0) {
2012 parent = rcu_dereference_raw(parent->rb_right);
2013 continue;
2014 }
2015
2016 return node;
2017 }
2018
2019 if (read_seqcount_retry(count, seq))
2020 goto again;
2021
2022 return NULL;
2023 }
2024
2025 static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_pol_inexact_bin * b,const xfrm_address_t * saddr,const xfrm_address_t * daddr)2026 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2027 struct xfrm_pol_inexact_bin *b,
2028 const xfrm_address_t *saddr,
2029 const xfrm_address_t *daddr)
2030 {
2031 struct xfrm_pol_inexact_node *n;
2032 u16 family;
2033
2034 if (!b)
2035 return false;
2036
2037 family = b->k.family;
2038 memset(cand, 0, sizeof(*cand));
2039 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2040
2041 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2042 family);
2043 if (n) {
2044 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2045 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2046 family);
2047 if (n)
2048 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2049 }
2050
2051 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2052 family);
2053 if (n)
2054 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2055
2056 return true;
2057 }
2058
2059 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2060 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2061 u8 dir, u32 if_id)
2062 {
2063 struct xfrm_pol_inexact_key k = {
2064 .family = family,
2065 .type = type,
2066 .dir = dir,
2067 .if_id = if_id,
2068 };
2069
2070 write_pnet(&k.net, net);
2071
2072 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2073 xfrm_pol_inexact_params);
2074 }
2075
2076 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2077 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2078 u8 dir, u32 if_id)
2079 {
2080 struct xfrm_pol_inexact_bin *bin;
2081
2082 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2083
2084 rcu_read_lock();
2085 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2086 rcu_read_unlock();
2087
2088 return bin;
2089 }
2090
2091 static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head * chain,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2092 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2093 struct xfrm_policy *prefer,
2094 const struct flowi *fl,
2095 u8 type, u16 family, u32 if_id)
2096 {
2097 u32 priority = prefer ? prefer->priority : ~0u;
2098 struct xfrm_policy *pol;
2099
2100 if (!chain)
2101 return NULL;
2102
2103 hlist_for_each_entry_rcu(pol, chain, bydst) {
2104 int err;
2105
2106 if (pol->priority > priority)
2107 break;
2108
2109 err = xfrm_policy_match(pol, fl, type, family, if_id);
2110 if (err) {
2111 if (err != -ESRCH)
2112 return ERR_PTR(err);
2113
2114 continue;
2115 }
2116
2117 if (prefer) {
2118 /* matches. Is it older than *prefer? */
2119 if (pol->priority == priority &&
2120 prefer->pos < pol->pos)
2121 return prefer;
2122 }
2123
2124 return pol;
2125 }
2126
2127 return NULL;
2128 }
2129
2130 static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2131 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2132 struct xfrm_policy *prefer,
2133 const struct flowi *fl,
2134 u8 type, u16 family, u32 if_id)
2135 {
2136 struct xfrm_policy *tmp;
2137 int i;
2138
2139 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2140 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2141 prefer,
2142 fl, type, family, if_id);
2143 if (!tmp)
2144 continue;
2145
2146 if (IS_ERR(tmp))
2147 return tmp;
2148 prefer = tmp;
2149 }
2150
2151 return prefer;
2152 }
2153
xfrm_policy_lookup_bytype(struct net * net,u8 type,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2154 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2155 const struct flowi *fl,
2156 u16 family, u8 dir,
2157 u32 if_id)
2158 {
2159 struct xfrm_pol_inexact_candidates cand;
2160 const xfrm_address_t *daddr, *saddr;
2161 struct xfrm_pol_inexact_bin *bin;
2162 struct xfrm_policy *pol, *ret;
2163 struct hlist_head *chain;
2164 unsigned int sequence;
2165 int err;
2166
2167 daddr = xfrm_flowi_daddr(fl, family);
2168 saddr = xfrm_flowi_saddr(fl, family);
2169 if (unlikely(!daddr || !saddr))
2170 return NULL;
2171
2172 rcu_read_lock();
2173 retry:
2174 do {
2175 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2176 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2177 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2178
2179 ret = NULL;
2180 hlist_for_each_entry_rcu(pol, chain, bydst) {
2181 err = xfrm_policy_match(pol, fl, type, family, if_id);
2182 if (err) {
2183 if (err == -ESRCH)
2184 continue;
2185 else {
2186 ret = ERR_PTR(err);
2187 goto fail;
2188 }
2189 } else {
2190 ret = pol;
2191 break;
2192 }
2193 }
2194 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2195 goto skip_inexact;
2196
2197 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2198 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2199 daddr))
2200 goto skip_inexact;
2201
2202 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2203 family, if_id);
2204 if (pol) {
2205 ret = pol;
2206 if (IS_ERR(pol))
2207 goto fail;
2208 }
2209
2210 skip_inexact:
2211 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2212 goto retry;
2213
2214 if (ret && !xfrm_pol_hold_rcu(ret))
2215 goto retry;
2216 fail:
2217 rcu_read_unlock();
2218
2219 return ret;
2220 }
2221
xfrm_policy_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2222 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2223 const struct flowi *fl,
2224 u16 family, u8 dir, u32 if_id)
2225 {
2226 #ifdef CONFIG_XFRM_SUB_POLICY
2227 struct xfrm_policy *pol;
2228
2229 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2230 dir, if_id);
2231 if (pol != NULL)
2232 return pol;
2233 #endif
2234 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2235 dir, if_id);
2236 }
2237
xfrm_sk_policy_lookup(const struct sock * sk,int dir,const struct flowi * fl,u16 family,u32 if_id)2238 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2239 const struct flowi *fl,
2240 u16 family, u32 if_id)
2241 {
2242 struct xfrm_policy *pol;
2243
2244 rcu_read_lock();
2245 again:
2246 pol = rcu_dereference(sk->sk_policy[dir]);
2247 if (pol != NULL) {
2248 bool match;
2249 int err = 0;
2250
2251 if (pol->family != family) {
2252 pol = NULL;
2253 goto out;
2254 }
2255
2256 match = xfrm_selector_match(&pol->selector, fl, family);
2257 if (match) {
2258 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2259 pol->if_id != if_id) {
2260 pol = NULL;
2261 goto out;
2262 }
2263 err = security_xfrm_policy_lookup(pol->security,
2264 fl->flowi_secid);
2265 if (!err) {
2266 if (!xfrm_pol_hold_rcu(pol))
2267 goto again;
2268 } else if (err == -ESRCH) {
2269 pol = NULL;
2270 } else {
2271 pol = ERR_PTR(err);
2272 }
2273 } else
2274 pol = NULL;
2275 }
2276 out:
2277 rcu_read_unlock();
2278 return pol;
2279 }
2280
__xfrm_policy_link(struct xfrm_policy * pol,int dir)2281 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2282 {
2283 struct net *net = xp_net(pol);
2284
2285 list_add(&pol->walk.all, &net->xfrm.policy_all);
2286 net->xfrm.policy_count[dir]++;
2287 xfrm_pol_hold(pol);
2288 }
2289
__xfrm_policy_unlink(struct xfrm_policy * pol,int dir)2290 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2291 int dir)
2292 {
2293 struct net *net = xp_net(pol);
2294
2295 if (list_empty(&pol->walk.all))
2296 return NULL;
2297
2298 /* Socket policies are not hashed. */
2299 if (!hlist_unhashed(&pol->bydst)) {
2300 hlist_del_rcu(&pol->bydst);
2301 hlist_del_init(&pol->bydst_inexact_list);
2302 hlist_del(&pol->byidx);
2303 }
2304
2305 list_del_init(&pol->walk.all);
2306 net->xfrm.policy_count[dir]--;
2307
2308 return pol;
2309 }
2310
xfrm_sk_policy_link(struct xfrm_policy * pol,int dir)2311 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2312 {
2313 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2314 }
2315
xfrm_sk_policy_unlink(struct xfrm_policy * pol,int dir)2316 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2317 {
2318 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2319 }
2320
xfrm_policy_delete(struct xfrm_policy * pol,int dir)2321 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2322 {
2323 struct net *net = xp_net(pol);
2324
2325 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2326 pol = __xfrm_policy_unlink(pol, dir);
2327 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2328 if (pol) {
2329 xfrm_dev_policy_delete(pol);
2330 xfrm_policy_kill(pol);
2331 return 0;
2332 }
2333 return -ENOENT;
2334 }
2335 EXPORT_SYMBOL(xfrm_policy_delete);
2336
xfrm_sk_policy_insert(struct sock * sk,int dir,struct xfrm_policy * pol)2337 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2338 {
2339 struct net *net = sock_net(sk);
2340 struct xfrm_policy *old_pol;
2341
2342 #ifdef CONFIG_XFRM_SUB_POLICY
2343 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2344 return -EINVAL;
2345 #endif
2346
2347 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2348 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2349 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2350 if (pol) {
2351 pol->curlft.add_time = ktime_get_real_seconds();
2352 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2353 xfrm_sk_policy_link(pol, dir);
2354 }
2355 rcu_assign_pointer(sk->sk_policy[dir], pol);
2356 if (old_pol) {
2357 if (pol)
2358 xfrm_policy_requeue(old_pol, pol);
2359
2360 /* Unlinking succeeds always. This is the only function
2361 * allowed to delete or replace socket policy.
2362 */
2363 xfrm_sk_policy_unlink(old_pol, dir);
2364 }
2365 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2366
2367 if (old_pol) {
2368 xfrm_policy_kill(old_pol);
2369 }
2370 return 0;
2371 }
2372
clone_policy(const struct xfrm_policy * old,int dir)2373 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2374 {
2375 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2376 struct net *net = xp_net(old);
2377
2378 if (newp) {
2379 newp->selector = old->selector;
2380 if (security_xfrm_policy_clone(old->security,
2381 &newp->security)) {
2382 kfree(newp);
2383 return NULL; /* ENOMEM */
2384 }
2385 newp->lft = old->lft;
2386 newp->curlft = old->curlft;
2387 newp->mark = old->mark;
2388 newp->if_id = old->if_id;
2389 newp->action = old->action;
2390 newp->flags = old->flags;
2391 newp->xfrm_nr = old->xfrm_nr;
2392 newp->index = old->index;
2393 newp->type = old->type;
2394 newp->family = old->family;
2395 memcpy(newp->xfrm_vec, old->xfrm_vec,
2396 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2397 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2398 xfrm_sk_policy_link(newp, dir);
2399 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2400 xfrm_pol_put(newp);
2401 }
2402 return newp;
2403 }
2404
__xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)2405 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2406 {
2407 const struct xfrm_policy *p;
2408 struct xfrm_policy *np;
2409 int i, ret = 0;
2410
2411 rcu_read_lock();
2412 for (i = 0; i < 2; i++) {
2413 p = rcu_dereference(osk->sk_policy[i]);
2414 if (p) {
2415 np = clone_policy(p, i);
2416 if (unlikely(!np)) {
2417 ret = -ENOMEM;
2418 break;
2419 }
2420 rcu_assign_pointer(sk->sk_policy[i], np);
2421 }
2422 }
2423 rcu_read_unlock();
2424 return ret;
2425 }
2426
2427 static int
xfrm_get_saddr(struct net * net,int oif,xfrm_address_t * local,xfrm_address_t * remote,unsigned short family,u32 mark)2428 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2429 xfrm_address_t *remote, unsigned short family, u32 mark)
2430 {
2431 int err;
2432 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2433
2434 if (unlikely(afinfo == NULL))
2435 return -EINVAL;
2436 err = afinfo->get_saddr(net, oif, local, remote, mark);
2437 rcu_read_unlock();
2438 return err;
2439 }
2440
2441 /* Resolve list of templates for the flow, given policy. */
2442
2443 static int
xfrm_tmpl_resolve_one(struct xfrm_policy * policy,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2444 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2445 struct xfrm_state **xfrm, unsigned short family)
2446 {
2447 struct net *net = xp_net(policy);
2448 int nx;
2449 int i, error;
2450 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2451 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2452 xfrm_address_t tmp;
2453
2454 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2455 struct xfrm_state *x;
2456 xfrm_address_t *remote = daddr;
2457 xfrm_address_t *local = saddr;
2458 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2459
2460 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2461 tmpl->mode == XFRM_MODE_BEET) {
2462 remote = &tmpl->id.daddr;
2463 local = &tmpl->saddr;
2464 if (xfrm_addr_any(local, tmpl->encap_family)) {
2465 error = xfrm_get_saddr(net, fl->flowi_oif,
2466 &tmp, remote,
2467 tmpl->encap_family, 0);
2468 if (error)
2469 goto fail;
2470 local = &tmp;
2471 }
2472 }
2473
2474 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2475 family, policy->if_id);
2476
2477 if (x && x->km.state == XFRM_STATE_VALID) {
2478 xfrm[nx++] = x;
2479 daddr = remote;
2480 saddr = local;
2481 continue;
2482 }
2483 if (x) {
2484 error = (x->km.state == XFRM_STATE_ERROR ?
2485 -EINVAL : -EAGAIN);
2486 xfrm_state_put(x);
2487 } else if (error == -ESRCH) {
2488 error = -EAGAIN;
2489 }
2490
2491 if (!tmpl->optional)
2492 goto fail;
2493 }
2494 return nx;
2495
2496 fail:
2497 for (nx--; nx >= 0; nx--)
2498 xfrm_state_put(xfrm[nx]);
2499 return error;
2500 }
2501
2502 static int
xfrm_tmpl_resolve(struct xfrm_policy ** pols,int npols,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2503 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2504 struct xfrm_state **xfrm, unsigned short family)
2505 {
2506 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2507 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2508 int cnx = 0;
2509 int error;
2510 int ret;
2511 int i;
2512
2513 for (i = 0; i < npols; i++) {
2514 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2515 error = -ENOBUFS;
2516 goto fail;
2517 }
2518
2519 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2520 if (ret < 0) {
2521 error = ret;
2522 goto fail;
2523 } else
2524 cnx += ret;
2525 }
2526
2527 /* found states are sorted for outbound processing */
2528 if (npols > 1)
2529 xfrm_state_sort(xfrm, tpp, cnx, family);
2530
2531 return cnx;
2532
2533 fail:
2534 for (cnx--; cnx >= 0; cnx--)
2535 xfrm_state_put(tpp[cnx]);
2536 return error;
2537
2538 }
2539
xfrm_get_tos(const struct flowi * fl,int family)2540 static int xfrm_get_tos(const struct flowi *fl, int family)
2541 {
2542 if (family == AF_INET)
2543 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2544
2545 return 0;
2546 }
2547
xfrm_alloc_dst(struct net * net,int family)2548 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2549 {
2550 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2551 struct dst_ops *dst_ops;
2552 struct xfrm_dst *xdst;
2553
2554 if (!afinfo)
2555 return ERR_PTR(-EINVAL);
2556
2557 switch (family) {
2558 case AF_INET:
2559 dst_ops = &net->xfrm.xfrm4_dst_ops;
2560 break;
2561 #if IS_ENABLED(CONFIG_IPV6)
2562 case AF_INET6:
2563 dst_ops = &net->xfrm.xfrm6_dst_ops;
2564 break;
2565 #endif
2566 default:
2567 BUG();
2568 }
2569 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2570
2571 if (likely(xdst)) {
2572 memset_after(xdst, 0, u.dst);
2573 } else
2574 xdst = ERR_PTR(-ENOBUFS);
2575
2576 rcu_read_unlock();
2577
2578 return xdst;
2579 }
2580
xfrm_init_path(struct xfrm_dst * path,struct dst_entry * dst,int nfheader_len)2581 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2582 int nfheader_len)
2583 {
2584 if (dst->ops->family == AF_INET6) {
2585 struct rt6_info *rt = (struct rt6_info *)dst;
2586 path->path_cookie = rt6_get_cookie(rt);
2587 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2588 }
2589 }
2590
xfrm_fill_dst(struct xfrm_dst * xdst,struct net_device * dev,const struct flowi * fl)2591 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2592 const struct flowi *fl)
2593 {
2594 const struct xfrm_policy_afinfo *afinfo =
2595 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2596 int err;
2597
2598 if (!afinfo)
2599 return -EINVAL;
2600
2601 err = afinfo->fill_dst(xdst, dev, fl);
2602
2603 rcu_read_unlock();
2604
2605 return err;
2606 }
2607
2608
2609 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2610 * all the metrics... Shortly, bundle a bundle.
2611 */
2612
xfrm_bundle_create(struct xfrm_policy * policy,struct xfrm_state ** xfrm,struct xfrm_dst ** bundle,int nx,const struct flowi * fl,struct dst_entry * dst)2613 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2614 struct xfrm_state **xfrm,
2615 struct xfrm_dst **bundle,
2616 int nx,
2617 const struct flowi *fl,
2618 struct dst_entry *dst)
2619 {
2620 const struct xfrm_state_afinfo *afinfo;
2621 const struct xfrm_mode *inner_mode;
2622 struct net *net = xp_net(policy);
2623 unsigned long now = jiffies;
2624 struct net_device *dev;
2625 struct xfrm_dst *xdst_prev = NULL;
2626 struct xfrm_dst *xdst0 = NULL;
2627 int i = 0;
2628 int err;
2629 int header_len = 0;
2630 int nfheader_len = 0;
2631 int trailer_len = 0;
2632 int tos;
2633 int family = policy->selector.family;
2634 xfrm_address_t saddr, daddr;
2635
2636 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2637
2638 tos = xfrm_get_tos(fl, family);
2639
2640 dst_hold(dst);
2641
2642 for (; i < nx; i++) {
2643 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2644 struct dst_entry *dst1 = &xdst->u.dst;
2645
2646 err = PTR_ERR(xdst);
2647 if (IS_ERR(xdst)) {
2648 dst_release(dst);
2649 goto put_states;
2650 }
2651
2652 bundle[i] = xdst;
2653 if (!xdst_prev)
2654 xdst0 = xdst;
2655 else
2656 /* Ref count is taken during xfrm_alloc_dst()
2657 * No need to do dst_clone() on dst1
2658 */
2659 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2660
2661 if (xfrm[i]->sel.family == AF_UNSPEC) {
2662 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2663 xfrm_af2proto(family));
2664 if (!inner_mode) {
2665 err = -EAFNOSUPPORT;
2666 dst_release(dst);
2667 goto put_states;
2668 }
2669 } else
2670 inner_mode = &xfrm[i]->inner_mode;
2671
2672 xdst->route = dst;
2673 dst_copy_metrics(dst1, dst);
2674
2675 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2676 __u32 mark = 0;
2677 int oif;
2678
2679 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2680 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2681
2682 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2683 family = xfrm[i]->props.family;
2684
2685 oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2686 dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2687 &saddr, &daddr, family, mark);
2688 err = PTR_ERR(dst);
2689 if (IS_ERR(dst))
2690 goto put_states;
2691 } else
2692 dst_hold(dst);
2693
2694 dst1->xfrm = xfrm[i];
2695 xdst->xfrm_genid = xfrm[i]->genid;
2696
2697 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2698 dst1->lastuse = now;
2699
2700 dst1->input = dst_discard;
2701
2702 rcu_read_lock();
2703 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2704 if (likely(afinfo))
2705 dst1->output = afinfo->output;
2706 else
2707 dst1->output = dst_discard_out;
2708 rcu_read_unlock();
2709
2710 xdst_prev = xdst;
2711
2712 header_len += xfrm[i]->props.header_len;
2713 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2714 nfheader_len += xfrm[i]->props.header_len;
2715 trailer_len += xfrm[i]->props.trailer_len;
2716 }
2717
2718 xfrm_dst_set_child(xdst_prev, dst);
2719 xdst0->path = dst;
2720
2721 err = -ENODEV;
2722 dev = dst->dev;
2723 if (!dev)
2724 goto free_dst;
2725
2726 xfrm_init_path(xdst0, dst, nfheader_len);
2727 xfrm_init_pmtu(bundle, nx);
2728
2729 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2730 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2731 err = xfrm_fill_dst(xdst_prev, dev, fl);
2732 if (err)
2733 goto free_dst;
2734
2735 xdst_prev->u.dst.header_len = header_len;
2736 xdst_prev->u.dst.trailer_len = trailer_len;
2737 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2738 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2739 }
2740
2741 return &xdst0->u.dst;
2742
2743 put_states:
2744 for (; i < nx; i++)
2745 xfrm_state_put(xfrm[i]);
2746 free_dst:
2747 if (xdst0)
2748 dst_release_immediate(&xdst0->u.dst);
2749
2750 return ERR_PTR(err);
2751 }
2752
xfrm_expand_policies(const struct flowi * fl,u16 family,struct xfrm_policy ** pols,int * num_pols,int * num_xfrms)2753 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2754 struct xfrm_policy **pols,
2755 int *num_pols, int *num_xfrms)
2756 {
2757 int i;
2758
2759 if (*num_pols == 0 || !pols[0]) {
2760 *num_pols = 0;
2761 *num_xfrms = 0;
2762 return 0;
2763 }
2764 if (IS_ERR(pols[0])) {
2765 *num_pols = 0;
2766 return PTR_ERR(pols[0]);
2767 }
2768
2769 *num_xfrms = pols[0]->xfrm_nr;
2770
2771 #ifdef CONFIG_XFRM_SUB_POLICY
2772 if (pols[0]->action == XFRM_POLICY_ALLOW &&
2773 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2774 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2775 XFRM_POLICY_TYPE_MAIN,
2776 fl, family,
2777 XFRM_POLICY_OUT,
2778 pols[0]->if_id);
2779 if (pols[1]) {
2780 if (IS_ERR(pols[1])) {
2781 xfrm_pols_put(pols, *num_pols);
2782 *num_pols = 0;
2783 return PTR_ERR(pols[1]);
2784 }
2785 (*num_pols)++;
2786 (*num_xfrms) += pols[1]->xfrm_nr;
2787 }
2788 }
2789 #endif
2790 for (i = 0; i < *num_pols; i++) {
2791 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2792 *num_xfrms = -1;
2793 break;
2794 }
2795 }
2796
2797 return 0;
2798
2799 }
2800
2801 static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy ** pols,int num_pols,const struct flowi * fl,u16 family,struct dst_entry * dst_orig)2802 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2803 const struct flowi *fl, u16 family,
2804 struct dst_entry *dst_orig)
2805 {
2806 struct net *net = xp_net(pols[0]);
2807 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2808 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2809 struct xfrm_dst *xdst;
2810 struct dst_entry *dst;
2811 int err;
2812
2813 /* Try to instantiate a bundle */
2814 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2815 if (err <= 0) {
2816 if (err == 0)
2817 return NULL;
2818
2819 if (err != -EAGAIN)
2820 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2821 return ERR_PTR(err);
2822 }
2823
2824 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2825 if (IS_ERR(dst)) {
2826 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2827 return ERR_CAST(dst);
2828 }
2829
2830 xdst = (struct xfrm_dst *)dst;
2831 xdst->num_xfrms = err;
2832 xdst->num_pols = num_pols;
2833 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2834 xdst->policy_genid = atomic_read(&pols[0]->genid);
2835
2836 return xdst;
2837 }
2838
xfrm_policy_queue_process(struct timer_list * t)2839 static void xfrm_policy_queue_process(struct timer_list *t)
2840 {
2841 struct sk_buff *skb;
2842 struct sock *sk;
2843 struct dst_entry *dst;
2844 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2845 struct net *net = xp_net(pol);
2846 struct xfrm_policy_queue *pq = &pol->polq;
2847 struct flowi fl;
2848 struct sk_buff_head list;
2849 __u32 skb_mark;
2850
2851 spin_lock(&pq->hold_queue.lock);
2852 skb = skb_peek(&pq->hold_queue);
2853 if (!skb) {
2854 spin_unlock(&pq->hold_queue.lock);
2855 goto out;
2856 }
2857 dst = skb_dst(skb);
2858 sk = skb->sk;
2859
2860 /* Fixup the mark to support VTI. */
2861 skb_mark = skb->mark;
2862 skb->mark = pol->mark.v;
2863 xfrm_decode_session(skb, &fl, dst->ops->family);
2864 skb->mark = skb_mark;
2865 spin_unlock(&pq->hold_queue.lock);
2866
2867 dst_hold(xfrm_dst_path(dst));
2868 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2869 if (IS_ERR(dst))
2870 goto purge_queue;
2871
2872 if (dst->flags & DST_XFRM_QUEUE) {
2873 dst_release(dst);
2874
2875 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2876 goto purge_queue;
2877
2878 pq->timeout = pq->timeout << 1;
2879 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2880 xfrm_pol_hold(pol);
2881 goto out;
2882 }
2883
2884 dst_release(dst);
2885
2886 __skb_queue_head_init(&list);
2887
2888 spin_lock(&pq->hold_queue.lock);
2889 pq->timeout = 0;
2890 skb_queue_splice_init(&pq->hold_queue, &list);
2891 spin_unlock(&pq->hold_queue.lock);
2892
2893 while (!skb_queue_empty(&list)) {
2894 skb = __skb_dequeue(&list);
2895
2896 /* Fixup the mark to support VTI. */
2897 skb_mark = skb->mark;
2898 skb->mark = pol->mark.v;
2899 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2900 skb->mark = skb_mark;
2901
2902 dst_hold(xfrm_dst_path(skb_dst(skb)));
2903 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2904 if (IS_ERR(dst)) {
2905 kfree_skb(skb);
2906 continue;
2907 }
2908
2909 nf_reset_ct(skb);
2910 skb_dst_drop(skb);
2911 skb_dst_set(skb, dst);
2912
2913 dst_output(net, skb->sk, skb);
2914 }
2915
2916 out:
2917 xfrm_pol_put(pol);
2918 return;
2919
2920 purge_queue:
2921 pq->timeout = 0;
2922 skb_queue_purge(&pq->hold_queue);
2923 xfrm_pol_put(pol);
2924 }
2925
xdst_queue_output(struct net * net,struct sock * sk,struct sk_buff * skb)2926 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2927 {
2928 unsigned long sched_next;
2929 struct dst_entry *dst = skb_dst(skb);
2930 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2931 struct xfrm_policy *pol = xdst->pols[0];
2932 struct xfrm_policy_queue *pq = &pol->polq;
2933
2934 if (unlikely(skb_fclone_busy(sk, skb))) {
2935 kfree_skb(skb);
2936 return 0;
2937 }
2938
2939 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2940 kfree_skb(skb);
2941 return -EAGAIN;
2942 }
2943
2944 skb_dst_force(skb);
2945
2946 spin_lock_bh(&pq->hold_queue.lock);
2947
2948 if (!pq->timeout)
2949 pq->timeout = XFRM_QUEUE_TMO_MIN;
2950
2951 sched_next = jiffies + pq->timeout;
2952
2953 if (del_timer(&pq->hold_timer)) {
2954 if (time_before(pq->hold_timer.expires, sched_next))
2955 sched_next = pq->hold_timer.expires;
2956 xfrm_pol_put(pol);
2957 }
2958
2959 __skb_queue_tail(&pq->hold_queue, skb);
2960 if (!mod_timer(&pq->hold_timer, sched_next))
2961 xfrm_pol_hold(pol);
2962
2963 spin_unlock_bh(&pq->hold_queue.lock);
2964
2965 return 0;
2966 }
2967
xfrm_create_dummy_bundle(struct net * net,struct xfrm_flo * xflo,const struct flowi * fl,int num_xfrms,u16 family)2968 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2969 struct xfrm_flo *xflo,
2970 const struct flowi *fl,
2971 int num_xfrms,
2972 u16 family)
2973 {
2974 int err;
2975 struct net_device *dev;
2976 struct dst_entry *dst;
2977 struct dst_entry *dst1;
2978 struct xfrm_dst *xdst;
2979
2980 xdst = xfrm_alloc_dst(net, family);
2981 if (IS_ERR(xdst))
2982 return xdst;
2983
2984 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2985 net->xfrm.sysctl_larval_drop ||
2986 num_xfrms <= 0)
2987 return xdst;
2988
2989 dst = xflo->dst_orig;
2990 dst1 = &xdst->u.dst;
2991 dst_hold(dst);
2992 xdst->route = dst;
2993
2994 dst_copy_metrics(dst1, dst);
2995
2996 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2997 dst1->flags |= DST_XFRM_QUEUE;
2998 dst1->lastuse = jiffies;
2999
3000 dst1->input = dst_discard;
3001 dst1->output = xdst_queue_output;
3002
3003 dst_hold(dst);
3004 xfrm_dst_set_child(xdst, dst);
3005 xdst->path = dst;
3006
3007 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3008
3009 err = -ENODEV;
3010 dev = dst->dev;
3011 if (!dev)
3012 goto free_dst;
3013
3014 err = xfrm_fill_dst(xdst, dev, fl);
3015 if (err)
3016 goto free_dst;
3017
3018 out:
3019 return xdst;
3020
3021 free_dst:
3022 dst_release(dst1);
3023 xdst = ERR_PTR(err);
3024 goto out;
3025 }
3026
xfrm_bundle_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,struct xfrm_flo * xflo,u32 if_id)3027 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3028 const struct flowi *fl,
3029 u16 family, u8 dir,
3030 struct xfrm_flo *xflo, u32 if_id)
3031 {
3032 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3033 int num_pols = 0, num_xfrms = 0, err;
3034 struct xfrm_dst *xdst;
3035
3036 /* Resolve policies to use if we couldn't get them from
3037 * previous cache entry */
3038 num_pols = 1;
3039 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3040 err = xfrm_expand_policies(fl, family, pols,
3041 &num_pols, &num_xfrms);
3042 if (err < 0)
3043 goto inc_error;
3044 if (num_pols == 0)
3045 return NULL;
3046 if (num_xfrms <= 0)
3047 goto make_dummy_bundle;
3048
3049 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3050 xflo->dst_orig);
3051 if (IS_ERR(xdst)) {
3052 err = PTR_ERR(xdst);
3053 if (err == -EREMOTE) {
3054 xfrm_pols_put(pols, num_pols);
3055 return NULL;
3056 }
3057
3058 if (err != -EAGAIN)
3059 goto error;
3060 goto make_dummy_bundle;
3061 } else if (xdst == NULL) {
3062 num_xfrms = 0;
3063 goto make_dummy_bundle;
3064 }
3065
3066 return xdst;
3067
3068 make_dummy_bundle:
3069 /* We found policies, but there's no bundles to instantiate:
3070 * either because the policy blocks, has no transformations or
3071 * we could not build template (no xfrm_states).*/
3072 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3073 if (IS_ERR(xdst)) {
3074 xfrm_pols_put(pols, num_pols);
3075 return ERR_CAST(xdst);
3076 }
3077 xdst->num_pols = num_pols;
3078 xdst->num_xfrms = num_xfrms;
3079 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3080
3081 return xdst;
3082
3083 inc_error:
3084 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3085 error:
3086 xfrm_pols_put(pols, num_pols);
3087 return ERR_PTR(err);
3088 }
3089
make_blackhole(struct net * net,u16 family,struct dst_entry * dst_orig)3090 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3091 struct dst_entry *dst_orig)
3092 {
3093 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3094 struct dst_entry *ret;
3095
3096 if (!afinfo) {
3097 dst_release(dst_orig);
3098 return ERR_PTR(-EINVAL);
3099 } else {
3100 ret = afinfo->blackhole_route(net, dst_orig);
3101 }
3102 rcu_read_unlock();
3103
3104 return ret;
3105 }
3106
3107 /* Finds/creates a bundle for given flow and if_id
3108 *
3109 * At the moment we eat a raw IP route. Mostly to speed up lookups
3110 * on interfaces with disabled IPsec.
3111 *
3112 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3113 * compatibility
3114 */
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)3115 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3116 struct dst_entry *dst_orig,
3117 const struct flowi *fl,
3118 const struct sock *sk,
3119 int flags, u32 if_id)
3120 {
3121 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3122 struct xfrm_dst *xdst;
3123 struct dst_entry *dst, *route;
3124 u16 family = dst_orig->ops->family;
3125 u8 dir = XFRM_POLICY_OUT;
3126 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3127
3128 dst = NULL;
3129 xdst = NULL;
3130 route = NULL;
3131
3132 sk = sk_const_to_full_sk(sk);
3133 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3134 num_pols = 1;
3135 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3136 if_id);
3137 err = xfrm_expand_policies(fl, family, pols,
3138 &num_pols, &num_xfrms);
3139 if (err < 0)
3140 goto dropdst;
3141
3142 if (num_pols) {
3143 if (num_xfrms <= 0) {
3144 drop_pols = num_pols;
3145 goto no_transform;
3146 }
3147
3148 xdst = xfrm_resolve_and_create_bundle(
3149 pols, num_pols, fl,
3150 family, dst_orig);
3151
3152 if (IS_ERR(xdst)) {
3153 xfrm_pols_put(pols, num_pols);
3154 err = PTR_ERR(xdst);
3155 if (err == -EREMOTE)
3156 goto nopol;
3157
3158 goto dropdst;
3159 } else if (xdst == NULL) {
3160 num_xfrms = 0;
3161 drop_pols = num_pols;
3162 goto no_transform;
3163 }
3164
3165 route = xdst->route;
3166 }
3167 }
3168
3169 if (xdst == NULL) {
3170 struct xfrm_flo xflo;
3171
3172 xflo.dst_orig = dst_orig;
3173 xflo.flags = flags;
3174
3175 /* To accelerate a bit... */
3176 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3177 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3178 goto nopol;
3179
3180 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3181 if (xdst == NULL)
3182 goto nopol;
3183 if (IS_ERR(xdst)) {
3184 err = PTR_ERR(xdst);
3185 goto dropdst;
3186 }
3187
3188 num_pols = xdst->num_pols;
3189 num_xfrms = xdst->num_xfrms;
3190 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3191 route = xdst->route;
3192 }
3193
3194 dst = &xdst->u.dst;
3195 if (route == NULL && num_xfrms > 0) {
3196 /* The only case when xfrm_bundle_lookup() returns a
3197 * bundle with null route, is when the template could
3198 * not be resolved. It means policies are there, but
3199 * bundle could not be created, since we don't yet
3200 * have the xfrm_state's. We need to wait for KM to
3201 * negotiate new SA's or bail out with error.*/
3202 if (net->xfrm.sysctl_larval_drop) {
3203 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3204 err = -EREMOTE;
3205 goto error;
3206 }
3207
3208 err = -EAGAIN;
3209
3210 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3211 goto error;
3212 }
3213
3214 no_transform:
3215 if (num_pols == 0)
3216 goto nopol;
3217
3218 if ((flags & XFRM_LOOKUP_ICMP) &&
3219 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3220 err = -ENOENT;
3221 goto error;
3222 }
3223
3224 for (i = 0; i < num_pols; i++)
3225 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3226
3227 if (num_xfrms < 0) {
3228 /* Prohibit the flow */
3229 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3230 err = -EPERM;
3231 goto error;
3232 } else if (num_xfrms > 0) {
3233 /* Flow transformed */
3234 dst_release(dst_orig);
3235 } else {
3236 /* Flow passes untransformed */
3237 dst_release(dst);
3238 dst = dst_orig;
3239 }
3240 ok:
3241 xfrm_pols_put(pols, drop_pols);
3242 if (dst && dst->xfrm &&
3243 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3244 dst->flags |= DST_XFRM_TUNNEL;
3245 return dst;
3246
3247 nopol:
3248 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3249 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3250 err = -EPERM;
3251 goto error;
3252 }
3253 if (!(flags & XFRM_LOOKUP_ICMP)) {
3254 dst = dst_orig;
3255 goto ok;
3256 }
3257 err = -ENOENT;
3258 error:
3259 dst_release(dst);
3260 dropdst:
3261 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3262 dst_release(dst_orig);
3263 xfrm_pols_put(pols, drop_pols);
3264 return ERR_PTR(err);
3265 }
3266 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3267
3268 /* Main function: finds/creates a bundle for given flow.
3269 *
3270 * At the moment we eat a raw IP route. Mostly to speed up lookups
3271 * on interfaces with disabled IPsec.
3272 */
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3273 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3274 const struct flowi *fl, const struct sock *sk,
3275 int flags)
3276 {
3277 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3278 }
3279 EXPORT_SYMBOL(xfrm_lookup);
3280
3281 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3282 * Otherwise we may send out blackholed packets.
3283 */
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3284 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3285 const struct flowi *fl,
3286 const struct sock *sk, int flags)
3287 {
3288 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3289 flags | XFRM_LOOKUP_QUEUE |
3290 XFRM_LOOKUP_KEEP_DST_REF);
3291
3292 if (PTR_ERR(dst) == -EREMOTE)
3293 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3294
3295 if (IS_ERR(dst))
3296 dst_release(dst_orig);
3297
3298 return dst;
3299 }
3300 EXPORT_SYMBOL(xfrm_lookup_route);
3301
3302 static inline int
xfrm_secpath_reject(int idx,struct sk_buff * skb,const struct flowi * fl)3303 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3304 {
3305 struct sec_path *sp = skb_sec_path(skb);
3306 struct xfrm_state *x;
3307
3308 if (!sp || idx < 0 || idx >= sp->len)
3309 return 0;
3310 x = sp->xvec[idx];
3311 if (!x->type->reject)
3312 return 0;
3313 return x->type->reject(x, skb, fl);
3314 }
3315
3316 /* When skb is transformed back to its "native" form, we have to
3317 * check policy restrictions. At the moment we make this in maximally
3318 * stupid way. Shame on me. :-) Of course, connected sockets must
3319 * have policy cached at them.
3320 */
3321
3322 static inline int
xfrm_state_ok(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family,u32 if_id)3323 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3324 unsigned short family, u32 if_id)
3325 {
3326 if (xfrm_state_kern(x))
3327 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3328 return x->id.proto == tmpl->id.proto &&
3329 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3330 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3331 x->props.mode == tmpl->mode &&
3332 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3333 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3334 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3335 xfrm_state_addr_cmp(tmpl, x, family)) &&
3336 (if_id == 0 || if_id == x->if_id);
3337 }
3338
3339 /*
3340 * 0 or more than 0 is returned when validation is succeeded (either bypass
3341 * because of optional transport mode, or next index of the matched secpath
3342 * state with the template.
3343 * -1 is returned when no matching template is found.
3344 * Otherwise "-2 - errored_index" is returned.
3345 */
3346 static inline int
xfrm_policy_ok(const struct xfrm_tmpl * tmpl,const struct sec_path * sp,int start,unsigned short family,u32 if_id)3347 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3348 unsigned short family, u32 if_id)
3349 {
3350 int idx = start;
3351
3352 if (tmpl->optional) {
3353 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3354 return start;
3355 } else
3356 start = -1;
3357 for (; idx < sp->len; idx++) {
3358 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3359 return ++idx;
3360 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3361 if (idx < sp->verified_cnt) {
3362 /* Secpath entry previously verified, consider optional and
3363 * continue searching
3364 */
3365 continue;
3366 }
3367
3368 if (start == -1)
3369 start = -2-idx;
3370 break;
3371 }
3372 }
3373 return start;
3374 }
3375
3376 static void
decode_session4(struct sk_buff * skb,struct flowi * fl,bool reverse)3377 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3378 {
3379 const struct iphdr *iph = ip_hdr(skb);
3380 int ihl = iph->ihl;
3381 u8 *xprth = skb_network_header(skb) + ihl * 4;
3382 struct flowi4 *fl4 = &fl->u.ip4;
3383 int oif = 0;
3384
3385 if (skb_dst(skb) && skb_dst(skb)->dev)
3386 oif = skb_dst(skb)->dev->ifindex;
3387
3388 memset(fl4, 0, sizeof(struct flowi4));
3389 fl4->flowi4_mark = skb->mark;
3390 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3391
3392 fl4->flowi4_proto = iph->protocol;
3393 fl4->daddr = reverse ? iph->saddr : iph->daddr;
3394 fl4->saddr = reverse ? iph->daddr : iph->saddr;
3395 fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
3396
3397 if (!ip_is_fragment(iph)) {
3398 switch (iph->protocol) {
3399 case IPPROTO_UDP:
3400 case IPPROTO_UDPLITE:
3401 case IPPROTO_TCP:
3402 case IPPROTO_SCTP:
3403 case IPPROTO_DCCP:
3404 if (xprth + 4 < skb->data ||
3405 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3406 __be16 *ports;
3407
3408 xprth = skb_network_header(skb) + ihl * 4;
3409 ports = (__be16 *)xprth;
3410
3411 fl4->fl4_sport = ports[!!reverse];
3412 fl4->fl4_dport = ports[!reverse];
3413 }
3414 break;
3415 case IPPROTO_ICMP:
3416 if (xprth + 2 < skb->data ||
3417 pskb_may_pull(skb, xprth + 2 - skb->data)) {
3418 u8 *icmp;
3419
3420 xprth = skb_network_header(skb) + ihl * 4;
3421 icmp = xprth;
3422
3423 fl4->fl4_icmp_type = icmp[0];
3424 fl4->fl4_icmp_code = icmp[1];
3425 }
3426 break;
3427 case IPPROTO_GRE:
3428 if (xprth + 12 < skb->data ||
3429 pskb_may_pull(skb, xprth + 12 - skb->data)) {
3430 __be16 *greflags;
3431 __be32 *gre_hdr;
3432
3433 xprth = skb_network_header(skb) + ihl * 4;
3434 greflags = (__be16 *)xprth;
3435 gre_hdr = (__be32 *)xprth;
3436
3437 if (greflags[0] & GRE_KEY) {
3438 if (greflags[0] & GRE_CSUM)
3439 gre_hdr++;
3440 fl4->fl4_gre_key = gre_hdr[1];
3441 }
3442 }
3443 break;
3444 default:
3445 break;
3446 }
3447 }
3448 }
3449
3450 #if IS_ENABLED(CONFIG_IPV6)
3451 static void
decode_session6(struct sk_buff * skb,struct flowi * fl,bool reverse)3452 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3453 {
3454 struct flowi6 *fl6 = &fl->u.ip6;
3455 int onlyproto = 0;
3456 const struct ipv6hdr *hdr = ipv6_hdr(skb);
3457 u32 offset = sizeof(*hdr);
3458 struct ipv6_opt_hdr *exthdr;
3459 const unsigned char *nh = skb_network_header(skb);
3460 u16 nhoff = IP6CB(skb)->nhoff;
3461 int oif = 0;
3462 u8 nexthdr;
3463
3464 if (!nhoff)
3465 nhoff = offsetof(struct ipv6hdr, nexthdr);
3466
3467 nexthdr = nh[nhoff];
3468
3469 if (skb_dst(skb) && skb_dst(skb)->dev)
3470 oif = skb_dst(skb)->dev->ifindex;
3471
3472 memset(fl6, 0, sizeof(struct flowi6));
3473 fl6->flowi6_mark = skb->mark;
3474 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3475
3476 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3477 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3478
3479 while (nh + offset + sizeof(*exthdr) < skb->data ||
3480 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3481 nh = skb_network_header(skb);
3482 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3483
3484 switch (nexthdr) {
3485 case NEXTHDR_FRAGMENT:
3486 onlyproto = 1;
3487 fallthrough;
3488 case NEXTHDR_ROUTING:
3489 case NEXTHDR_HOP:
3490 case NEXTHDR_DEST:
3491 offset += ipv6_optlen(exthdr);
3492 nexthdr = exthdr->nexthdr;
3493 break;
3494 case IPPROTO_UDP:
3495 case IPPROTO_UDPLITE:
3496 case IPPROTO_TCP:
3497 case IPPROTO_SCTP:
3498 case IPPROTO_DCCP:
3499 if (!onlyproto && (nh + offset + 4 < skb->data ||
3500 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3501 __be16 *ports;
3502
3503 nh = skb_network_header(skb);
3504 ports = (__be16 *)(nh + offset);
3505 fl6->fl6_sport = ports[!!reverse];
3506 fl6->fl6_dport = ports[!reverse];
3507 }
3508 fl6->flowi6_proto = nexthdr;
3509 return;
3510 case IPPROTO_ICMPV6:
3511 if (!onlyproto && (nh + offset + 2 < skb->data ||
3512 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3513 u8 *icmp;
3514
3515 nh = skb_network_header(skb);
3516 icmp = (u8 *)(nh + offset);
3517 fl6->fl6_icmp_type = icmp[0];
3518 fl6->fl6_icmp_code = icmp[1];
3519 }
3520 fl6->flowi6_proto = nexthdr;
3521 return;
3522 case IPPROTO_GRE:
3523 if (!onlyproto &&
3524 (nh + offset + 12 < skb->data ||
3525 pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
3526 struct gre_base_hdr *gre_hdr;
3527 __be32 *gre_key;
3528
3529 nh = skb_network_header(skb);
3530 gre_hdr = (struct gre_base_hdr *)(nh + offset);
3531 gre_key = (__be32 *)(gre_hdr + 1);
3532
3533 if (gre_hdr->flags & GRE_KEY) {
3534 if (gre_hdr->flags & GRE_CSUM)
3535 gre_key++;
3536 fl6->fl6_gre_key = *gre_key;
3537 }
3538 }
3539 fl6->flowi6_proto = nexthdr;
3540 return;
3541
3542 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3543 case IPPROTO_MH:
3544 offset += ipv6_optlen(exthdr);
3545 if (!onlyproto && (nh + offset + 3 < skb->data ||
3546 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3547 struct ip6_mh *mh;
3548
3549 nh = skb_network_header(skb);
3550 mh = (struct ip6_mh *)(nh + offset);
3551 fl6->fl6_mh_type = mh->ip6mh_type;
3552 }
3553 fl6->flowi6_proto = nexthdr;
3554 return;
3555 #endif
3556 default:
3557 fl6->flowi6_proto = nexthdr;
3558 return;
3559 }
3560 }
3561 }
3562 #endif
3563
__xfrm_decode_session(struct sk_buff * skb,struct flowi * fl,unsigned int family,int reverse)3564 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3565 unsigned int family, int reverse)
3566 {
3567 switch (family) {
3568 case AF_INET:
3569 decode_session4(skb, fl, reverse);
3570 break;
3571 #if IS_ENABLED(CONFIG_IPV6)
3572 case AF_INET6:
3573 decode_session6(skb, fl, reverse);
3574 break;
3575 #endif
3576 default:
3577 return -EAFNOSUPPORT;
3578 }
3579
3580 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3581 }
3582 EXPORT_SYMBOL(__xfrm_decode_session);
3583
secpath_has_nontransport(const struct sec_path * sp,int k,int * idxp)3584 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3585 {
3586 for (; k < sp->len; k++) {
3587 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3588 *idxp = k;
3589 return 1;
3590 }
3591 }
3592
3593 return 0;
3594 }
3595
__xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)3596 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3597 unsigned short family)
3598 {
3599 struct net *net = dev_net(skb->dev);
3600 struct xfrm_policy *pol;
3601 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3602 int npols = 0;
3603 int xfrm_nr;
3604 int pi;
3605 int reverse;
3606 struct flowi fl;
3607 int xerr_idx = -1;
3608 const struct xfrm_if_cb *ifcb;
3609 struct sec_path *sp;
3610 u32 if_id = 0;
3611
3612 rcu_read_lock();
3613 ifcb = xfrm_if_get_cb();
3614
3615 if (ifcb) {
3616 struct xfrm_if_decode_session_result r;
3617
3618 if (ifcb->decode_session(skb, family, &r)) {
3619 if_id = r.if_id;
3620 net = r.net;
3621 }
3622 }
3623 rcu_read_unlock();
3624
3625 reverse = dir & ~XFRM_POLICY_MASK;
3626 dir &= XFRM_POLICY_MASK;
3627
3628 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3629 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3630 return 0;
3631 }
3632
3633 nf_nat_decode_session(skb, &fl, family);
3634
3635 /* First, check used SA against their selectors. */
3636 sp = skb_sec_path(skb);
3637 if (sp) {
3638 int i;
3639
3640 for (i = sp->len - 1; i >= 0; i--) {
3641 struct xfrm_state *x = sp->xvec[i];
3642 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3643 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3644 return 0;
3645 }
3646 }
3647 }
3648
3649 pol = NULL;
3650 sk = sk_to_full_sk(sk);
3651 if (sk && sk->sk_policy[dir]) {
3652 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3653 if (IS_ERR(pol)) {
3654 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3655 return 0;
3656 }
3657 }
3658
3659 if (!pol)
3660 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3661
3662 if (IS_ERR(pol)) {
3663 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3664 return 0;
3665 }
3666
3667 if (!pol) {
3668 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3669 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3670 return 0;
3671 }
3672
3673 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3674 xfrm_secpath_reject(xerr_idx, skb, &fl);
3675 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3676 return 0;
3677 }
3678 return 1;
3679 }
3680
3681 /* This lockless write can happen from different cpus. */
3682 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3683
3684 pols[0] = pol;
3685 npols++;
3686 #ifdef CONFIG_XFRM_SUB_POLICY
3687 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3688 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3689 &fl, family,
3690 XFRM_POLICY_IN, if_id);
3691 if (pols[1]) {
3692 if (IS_ERR(pols[1])) {
3693 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3694 xfrm_pol_put(pols[0]);
3695 return 0;
3696 }
3697 /* This write can happen from different cpus. */
3698 WRITE_ONCE(pols[1]->curlft.use_time,
3699 ktime_get_real_seconds());
3700 npols++;
3701 }
3702 }
3703 #endif
3704
3705 if (pol->action == XFRM_POLICY_ALLOW) {
3706 static struct sec_path dummy;
3707 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3708 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3709 struct xfrm_tmpl **tpp = tp;
3710 int ti = 0;
3711 int i, k;
3712
3713 sp = skb_sec_path(skb);
3714 if (!sp)
3715 sp = &dummy;
3716
3717 for (pi = 0; pi < npols; pi++) {
3718 if (pols[pi] != pol &&
3719 pols[pi]->action != XFRM_POLICY_ALLOW) {
3720 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3721 goto reject;
3722 }
3723 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3724 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3725 goto reject_error;
3726 }
3727 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3728 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3729 }
3730 xfrm_nr = ti;
3731
3732 if (npols > 1) {
3733 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3734 tpp = stp;
3735 }
3736
3737 /* For each tunnel xfrm, find the first matching tmpl.
3738 * For each tmpl before that, find corresponding xfrm.
3739 * Order is _important_. Later we will implement
3740 * some barriers, but at the moment barriers
3741 * are implied between each two transformations.
3742 * Upon success, marks secpath entries as having been
3743 * verified to allow them to be skipped in future policy
3744 * checks (e.g. nested tunnels).
3745 */
3746 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3747 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3748 if (k < 0) {
3749 if (k < -1)
3750 /* "-2 - errored_index" returned */
3751 xerr_idx = -(2+k);
3752 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3753 goto reject;
3754 }
3755 }
3756
3757 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3758 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3759 goto reject;
3760 }
3761
3762 xfrm_pols_put(pols, npols);
3763 sp->verified_cnt = k;
3764
3765 return 1;
3766 }
3767 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3768
3769 reject:
3770 xfrm_secpath_reject(xerr_idx, skb, &fl);
3771 reject_error:
3772 xfrm_pols_put(pols, npols);
3773 return 0;
3774 }
3775 EXPORT_SYMBOL(__xfrm_policy_check);
3776
__xfrm_route_forward(struct sk_buff * skb,unsigned short family)3777 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3778 {
3779 struct net *net = dev_net(skb->dev);
3780 struct flowi fl;
3781 struct dst_entry *dst;
3782 int res = 1;
3783
3784 if (xfrm_decode_session(skb, &fl, family) < 0) {
3785 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3786 return 0;
3787 }
3788
3789 skb_dst_force(skb);
3790 if (!skb_dst(skb)) {
3791 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3792 return 0;
3793 }
3794
3795 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3796 if (IS_ERR(dst)) {
3797 res = 0;
3798 dst = NULL;
3799 }
3800 skb_dst_set(skb, dst);
3801 return res;
3802 }
3803 EXPORT_SYMBOL(__xfrm_route_forward);
3804
3805 /* Optimize later using cookies and generation ids. */
3806
xfrm_dst_check(struct dst_entry * dst,u32 cookie)3807 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3808 {
3809 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3810 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3811 * get validated by dst_ops->check on every use. We do this
3812 * because when a normal route referenced by an XFRM dst is
3813 * obsoleted we do not go looking around for all parent
3814 * referencing XFRM dsts so that we can invalidate them. It
3815 * is just too much work. Instead we make the checks here on
3816 * every use. For example:
3817 *
3818 * XFRM dst A --> IPv4 dst X
3819 *
3820 * X is the "xdst->route" of A (X is also the "dst->path" of A
3821 * in this example). If X is marked obsolete, "A" will not
3822 * notice. That's what we are validating here via the
3823 * stale_bundle() check.
3824 *
3825 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3826 * be marked on it.
3827 * This will force stale_bundle() to fail on any xdst bundle with
3828 * this dst linked in it.
3829 */
3830 if (dst->obsolete < 0 && !stale_bundle(dst))
3831 return dst;
3832
3833 return NULL;
3834 }
3835
stale_bundle(struct dst_entry * dst)3836 static int stale_bundle(struct dst_entry *dst)
3837 {
3838 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3839 }
3840
xfrm_dst_ifdown(struct dst_entry * dst,struct net_device * dev)3841 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3842 {
3843 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3844 dst->dev = blackhole_netdev;
3845 dev_hold(dst->dev);
3846 dev_put(dev);
3847 }
3848 }
3849 EXPORT_SYMBOL(xfrm_dst_ifdown);
3850
xfrm_link_failure(struct sk_buff * skb)3851 static void xfrm_link_failure(struct sk_buff *skb)
3852 {
3853 /* Impossible. Such dst must be popped before reaches point of failure. */
3854 }
3855
xfrm_negative_advice(struct dst_entry * dst)3856 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3857 {
3858 if (dst) {
3859 if (dst->obsolete) {
3860 dst_release(dst);
3861 dst = NULL;
3862 }
3863 }
3864 return dst;
3865 }
3866
xfrm_init_pmtu(struct xfrm_dst ** bundle,int nr)3867 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3868 {
3869 while (nr--) {
3870 struct xfrm_dst *xdst = bundle[nr];
3871 u32 pmtu, route_mtu_cached;
3872 struct dst_entry *dst;
3873
3874 dst = &xdst->u.dst;
3875 pmtu = dst_mtu(xfrm_dst_child(dst));
3876 xdst->child_mtu_cached = pmtu;
3877
3878 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3879
3880 route_mtu_cached = dst_mtu(xdst->route);
3881 xdst->route_mtu_cached = route_mtu_cached;
3882
3883 if (pmtu > route_mtu_cached)
3884 pmtu = route_mtu_cached;
3885
3886 dst_metric_set(dst, RTAX_MTU, pmtu);
3887 }
3888 }
3889
3890 /* Check that the bundle accepts the flow and its components are
3891 * still valid.
3892 */
3893
xfrm_bundle_ok(struct xfrm_dst * first)3894 static int xfrm_bundle_ok(struct xfrm_dst *first)
3895 {
3896 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3897 struct dst_entry *dst = &first->u.dst;
3898 struct xfrm_dst *xdst;
3899 int start_from, nr;
3900 u32 mtu;
3901
3902 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3903 (dst->dev && !netif_running(dst->dev)))
3904 return 0;
3905
3906 if (dst->flags & DST_XFRM_QUEUE)
3907 return 1;
3908
3909 start_from = nr = 0;
3910 do {
3911 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3912
3913 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3914 return 0;
3915 if (xdst->xfrm_genid != dst->xfrm->genid)
3916 return 0;
3917 if (xdst->num_pols > 0 &&
3918 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3919 return 0;
3920
3921 bundle[nr++] = xdst;
3922
3923 mtu = dst_mtu(xfrm_dst_child(dst));
3924 if (xdst->child_mtu_cached != mtu) {
3925 start_from = nr;
3926 xdst->child_mtu_cached = mtu;
3927 }
3928
3929 if (!dst_check(xdst->route, xdst->route_cookie))
3930 return 0;
3931 mtu = dst_mtu(xdst->route);
3932 if (xdst->route_mtu_cached != mtu) {
3933 start_from = nr;
3934 xdst->route_mtu_cached = mtu;
3935 }
3936
3937 dst = xfrm_dst_child(dst);
3938 } while (dst->xfrm);
3939
3940 if (likely(!start_from))
3941 return 1;
3942
3943 xdst = bundle[start_from - 1];
3944 mtu = xdst->child_mtu_cached;
3945 while (start_from--) {
3946 dst = &xdst->u.dst;
3947
3948 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3949 if (mtu > xdst->route_mtu_cached)
3950 mtu = xdst->route_mtu_cached;
3951 dst_metric_set(dst, RTAX_MTU, mtu);
3952 if (!start_from)
3953 break;
3954
3955 xdst = bundle[start_from - 1];
3956 xdst->child_mtu_cached = mtu;
3957 }
3958
3959 return 1;
3960 }
3961
xfrm_default_advmss(const struct dst_entry * dst)3962 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3963 {
3964 return dst_metric_advmss(xfrm_dst_path(dst));
3965 }
3966
xfrm_mtu(const struct dst_entry * dst)3967 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3968 {
3969 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3970
3971 return mtu ? : dst_mtu(xfrm_dst_path(dst));
3972 }
3973
xfrm_get_dst_nexthop(const struct dst_entry * dst,const void * daddr)3974 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3975 const void *daddr)
3976 {
3977 while (dst->xfrm) {
3978 const struct xfrm_state *xfrm = dst->xfrm;
3979
3980 dst = xfrm_dst_child(dst);
3981
3982 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3983 continue;
3984 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3985 daddr = xfrm->coaddr;
3986 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3987 daddr = &xfrm->id.daddr;
3988 }
3989 return daddr;
3990 }
3991
xfrm_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)3992 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3993 struct sk_buff *skb,
3994 const void *daddr)
3995 {
3996 const struct dst_entry *path = xfrm_dst_path(dst);
3997
3998 if (!skb)
3999 daddr = xfrm_get_dst_nexthop(dst, daddr);
4000 return path->ops->neigh_lookup(path, skb, daddr);
4001 }
4002
xfrm_confirm_neigh(const struct dst_entry * dst,const void * daddr)4003 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4004 {
4005 const struct dst_entry *path = xfrm_dst_path(dst);
4006
4007 daddr = xfrm_get_dst_nexthop(dst, daddr);
4008 path->ops->confirm_neigh(path, daddr);
4009 }
4010
xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo * afinfo,int family)4011 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4012 {
4013 int err = 0;
4014
4015 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4016 return -EAFNOSUPPORT;
4017
4018 spin_lock(&xfrm_policy_afinfo_lock);
4019 if (unlikely(xfrm_policy_afinfo[family] != NULL))
4020 err = -EEXIST;
4021 else {
4022 struct dst_ops *dst_ops = afinfo->dst_ops;
4023 if (likely(dst_ops->kmem_cachep == NULL))
4024 dst_ops->kmem_cachep = xfrm_dst_cache;
4025 if (likely(dst_ops->check == NULL))
4026 dst_ops->check = xfrm_dst_check;
4027 if (likely(dst_ops->default_advmss == NULL))
4028 dst_ops->default_advmss = xfrm_default_advmss;
4029 if (likely(dst_ops->mtu == NULL))
4030 dst_ops->mtu = xfrm_mtu;
4031 if (likely(dst_ops->negative_advice == NULL))
4032 dst_ops->negative_advice = xfrm_negative_advice;
4033 if (likely(dst_ops->link_failure == NULL))
4034 dst_ops->link_failure = xfrm_link_failure;
4035 if (likely(dst_ops->neigh_lookup == NULL))
4036 dst_ops->neigh_lookup = xfrm_neigh_lookup;
4037 if (likely(!dst_ops->confirm_neigh))
4038 dst_ops->confirm_neigh = xfrm_confirm_neigh;
4039 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4040 }
4041 spin_unlock(&xfrm_policy_afinfo_lock);
4042
4043 return err;
4044 }
4045 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4046
xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo * afinfo)4047 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4048 {
4049 struct dst_ops *dst_ops = afinfo->dst_ops;
4050 int i;
4051
4052 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4053 if (xfrm_policy_afinfo[i] != afinfo)
4054 continue;
4055 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4056 break;
4057 }
4058
4059 synchronize_rcu();
4060
4061 dst_ops->kmem_cachep = NULL;
4062 dst_ops->check = NULL;
4063 dst_ops->negative_advice = NULL;
4064 dst_ops->link_failure = NULL;
4065 }
4066 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4067
xfrm_if_register_cb(const struct xfrm_if_cb * ifcb)4068 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4069 {
4070 spin_lock(&xfrm_if_cb_lock);
4071 rcu_assign_pointer(xfrm_if_cb, ifcb);
4072 spin_unlock(&xfrm_if_cb_lock);
4073 }
4074 EXPORT_SYMBOL(xfrm_if_register_cb);
4075
xfrm_if_unregister_cb(void)4076 void xfrm_if_unregister_cb(void)
4077 {
4078 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4079 synchronize_rcu();
4080 }
4081 EXPORT_SYMBOL(xfrm_if_unregister_cb);
4082
4083 #ifdef CONFIG_XFRM_STATISTICS
xfrm_statistics_init(struct net * net)4084 static int __net_init xfrm_statistics_init(struct net *net)
4085 {
4086 int rv;
4087 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4088 if (!net->mib.xfrm_statistics)
4089 return -ENOMEM;
4090 rv = xfrm_proc_init(net);
4091 if (rv < 0)
4092 free_percpu(net->mib.xfrm_statistics);
4093 return rv;
4094 }
4095
xfrm_statistics_fini(struct net * net)4096 static void xfrm_statistics_fini(struct net *net)
4097 {
4098 xfrm_proc_fini(net);
4099 free_percpu(net->mib.xfrm_statistics);
4100 }
4101 #else
xfrm_statistics_init(struct net * net)4102 static int __net_init xfrm_statistics_init(struct net *net)
4103 {
4104 return 0;
4105 }
4106
xfrm_statistics_fini(struct net * net)4107 static void xfrm_statistics_fini(struct net *net)
4108 {
4109 }
4110 #endif
4111
xfrm_policy_init(struct net * net)4112 static int __net_init xfrm_policy_init(struct net *net)
4113 {
4114 unsigned int hmask, sz;
4115 int dir, err;
4116
4117 if (net_eq(net, &init_net)) {
4118 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4119 sizeof(struct xfrm_dst),
4120 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4121 NULL);
4122 err = rhashtable_init(&xfrm_policy_inexact_table,
4123 &xfrm_pol_inexact_params);
4124 BUG_ON(err);
4125 }
4126
4127 hmask = 8 - 1;
4128 sz = (hmask+1) * sizeof(struct hlist_head);
4129
4130 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4131 if (!net->xfrm.policy_byidx)
4132 goto out_byidx;
4133 net->xfrm.policy_idx_hmask = hmask;
4134
4135 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4136 struct xfrm_policy_hash *htab;
4137
4138 net->xfrm.policy_count[dir] = 0;
4139 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4140 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4141
4142 htab = &net->xfrm.policy_bydst[dir];
4143 htab->table = xfrm_hash_alloc(sz);
4144 if (!htab->table)
4145 goto out_bydst;
4146 htab->hmask = hmask;
4147 htab->dbits4 = 32;
4148 htab->sbits4 = 32;
4149 htab->dbits6 = 128;
4150 htab->sbits6 = 128;
4151 }
4152 net->xfrm.policy_hthresh.lbits4 = 32;
4153 net->xfrm.policy_hthresh.rbits4 = 32;
4154 net->xfrm.policy_hthresh.lbits6 = 128;
4155 net->xfrm.policy_hthresh.rbits6 = 128;
4156
4157 seqlock_init(&net->xfrm.policy_hthresh.lock);
4158
4159 INIT_LIST_HEAD(&net->xfrm.policy_all);
4160 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4161 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4162 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4163 return 0;
4164
4165 out_bydst:
4166 for (dir--; dir >= 0; dir--) {
4167 struct xfrm_policy_hash *htab;
4168
4169 htab = &net->xfrm.policy_bydst[dir];
4170 xfrm_hash_free(htab->table, sz);
4171 }
4172 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4173 out_byidx:
4174 return -ENOMEM;
4175 }
4176
xfrm_policy_fini(struct net * net)4177 static void xfrm_policy_fini(struct net *net)
4178 {
4179 struct xfrm_pol_inexact_bin *b, *t;
4180 unsigned int sz;
4181 int dir;
4182
4183 flush_work(&net->xfrm.policy_hash_work);
4184 #ifdef CONFIG_XFRM_SUB_POLICY
4185 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4186 #endif
4187 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4188
4189 WARN_ON(!list_empty(&net->xfrm.policy_all));
4190
4191 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4192 struct xfrm_policy_hash *htab;
4193
4194 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4195
4196 htab = &net->xfrm.policy_bydst[dir];
4197 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4198 WARN_ON(!hlist_empty(htab->table));
4199 xfrm_hash_free(htab->table, sz);
4200 }
4201
4202 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4203 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4204 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4205
4206 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4207 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4208 __xfrm_policy_inexact_prune_bin(b, true);
4209 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4210 }
4211
xfrm_net_init(struct net * net)4212 static int __net_init xfrm_net_init(struct net *net)
4213 {
4214 int rv;
4215
4216 /* Initialize the per-net locks here */
4217 spin_lock_init(&net->xfrm.xfrm_state_lock);
4218 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4219 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4220 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4221 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4222 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4223 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4224
4225 rv = xfrm_statistics_init(net);
4226 if (rv < 0)
4227 goto out_statistics;
4228 rv = xfrm_state_init(net);
4229 if (rv < 0)
4230 goto out_state;
4231 rv = xfrm_policy_init(net);
4232 if (rv < 0)
4233 goto out_policy;
4234 rv = xfrm_sysctl_init(net);
4235 if (rv < 0)
4236 goto out_sysctl;
4237
4238 return 0;
4239
4240 out_sysctl:
4241 xfrm_policy_fini(net);
4242 out_policy:
4243 xfrm_state_fini(net);
4244 out_state:
4245 xfrm_statistics_fini(net);
4246 out_statistics:
4247 return rv;
4248 }
4249
xfrm_net_exit(struct net * net)4250 static void __net_exit xfrm_net_exit(struct net *net)
4251 {
4252 xfrm_sysctl_fini(net);
4253 xfrm_policy_fini(net);
4254 xfrm_state_fini(net);
4255 xfrm_statistics_fini(net);
4256 }
4257
4258 static struct pernet_operations __net_initdata xfrm_net_ops = {
4259 .init = xfrm_net_init,
4260 .exit = xfrm_net_exit,
4261 };
4262
xfrm_init(void)4263 void __init xfrm_init(void)
4264 {
4265 register_pernet_subsys(&xfrm_net_ops);
4266 xfrm_dev_init();
4267 xfrm_input_init();
4268
4269 #ifdef CONFIG_XFRM_ESPINTCP
4270 espintcp_init();
4271 #endif
4272 }
4273
4274 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_common_policyinfo(struct xfrm_policy * xp,struct audit_buffer * audit_buf)4275 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4276 struct audit_buffer *audit_buf)
4277 {
4278 struct xfrm_sec_ctx *ctx = xp->security;
4279 struct xfrm_selector *sel = &xp->selector;
4280
4281 if (ctx)
4282 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4283 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4284
4285 switch (sel->family) {
4286 case AF_INET:
4287 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4288 if (sel->prefixlen_s != 32)
4289 audit_log_format(audit_buf, " src_prefixlen=%d",
4290 sel->prefixlen_s);
4291 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4292 if (sel->prefixlen_d != 32)
4293 audit_log_format(audit_buf, " dst_prefixlen=%d",
4294 sel->prefixlen_d);
4295 break;
4296 case AF_INET6:
4297 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4298 if (sel->prefixlen_s != 128)
4299 audit_log_format(audit_buf, " src_prefixlen=%d",
4300 sel->prefixlen_s);
4301 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4302 if (sel->prefixlen_d != 128)
4303 audit_log_format(audit_buf, " dst_prefixlen=%d",
4304 sel->prefixlen_d);
4305 break;
4306 }
4307 }
4308
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)4309 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4310 {
4311 struct audit_buffer *audit_buf;
4312
4313 audit_buf = xfrm_audit_start("SPD-add");
4314 if (audit_buf == NULL)
4315 return;
4316 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4317 audit_log_format(audit_buf, " res=%u", result);
4318 xfrm_audit_common_policyinfo(xp, audit_buf);
4319 audit_log_end(audit_buf);
4320 }
4321 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4322
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)4323 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4324 bool task_valid)
4325 {
4326 struct audit_buffer *audit_buf;
4327
4328 audit_buf = xfrm_audit_start("SPD-delete");
4329 if (audit_buf == NULL)
4330 return;
4331 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4332 audit_log_format(audit_buf, " res=%u", result);
4333 xfrm_audit_common_policyinfo(xp, audit_buf);
4334 audit_log_end(audit_buf);
4335 }
4336 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4337 #endif
4338
4339 #ifdef CONFIG_XFRM_MIGRATE
xfrm_migrate_selector_match(const struct xfrm_selector * sel_cmp,const struct xfrm_selector * sel_tgt)4340 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4341 const struct xfrm_selector *sel_tgt)
4342 {
4343 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4344 if (sel_tgt->family == sel_cmp->family &&
4345 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4346 sel_cmp->family) &&
4347 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4348 sel_cmp->family) &&
4349 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4350 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4351 return true;
4352 }
4353 } else {
4354 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4355 return true;
4356 }
4357 }
4358 return false;
4359 }
4360
xfrm_migrate_policy_find(const struct xfrm_selector * sel,u8 dir,u8 type,struct net * net,u32 if_id)4361 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4362 u8 dir, u8 type, struct net *net, u32 if_id)
4363 {
4364 struct xfrm_policy *pol, *ret = NULL;
4365 struct hlist_head *chain;
4366 u32 priority = ~0U;
4367
4368 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4369 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4370 hlist_for_each_entry(pol, chain, bydst) {
4371 if ((if_id == 0 || pol->if_id == if_id) &&
4372 xfrm_migrate_selector_match(sel, &pol->selector) &&
4373 pol->type == type) {
4374 ret = pol;
4375 priority = ret->priority;
4376 break;
4377 }
4378 }
4379 chain = &net->xfrm.policy_inexact[dir];
4380 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4381 if ((pol->priority >= priority) && ret)
4382 break;
4383
4384 if ((if_id == 0 || pol->if_id == if_id) &&
4385 xfrm_migrate_selector_match(sel, &pol->selector) &&
4386 pol->type == type) {
4387 ret = pol;
4388 break;
4389 }
4390 }
4391
4392 xfrm_pol_hold(ret);
4393
4394 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4395
4396 return ret;
4397 }
4398
migrate_tmpl_match(const struct xfrm_migrate * m,const struct xfrm_tmpl * t)4399 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4400 {
4401 int match = 0;
4402
4403 if (t->mode == m->mode && t->id.proto == m->proto &&
4404 (m->reqid == 0 || t->reqid == m->reqid)) {
4405 switch (t->mode) {
4406 case XFRM_MODE_TUNNEL:
4407 case XFRM_MODE_BEET:
4408 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4409 m->old_family) &&
4410 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4411 m->old_family)) {
4412 match = 1;
4413 }
4414 break;
4415 case XFRM_MODE_TRANSPORT:
4416 /* in case of transport mode, template does not store
4417 any IP addresses, hence we just compare mode and
4418 protocol */
4419 match = 1;
4420 break;
4421 default:
4422 break;
4423 }
4424 }
4425 return match;
4426 }
4427
4428 /* update endpoint address(es) of template(s) */
xfrm_policy_migrate(struct xfrm_policy * pol,struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4429 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4430 struct xfrm_migrate *m, int num_migrate,
4431 struct netlink_ext_ack *extack)
4432 {
4433 struct xfrm_migrate *mp;
4434 int i, j, n = 0;
4435
4436 write_lock_bh(&pol->lock);
4437 if (unlikely(pol->walk.dead)) {
4438 /* target policy has been deleted */
4439 NL_SET_ERR_MSG(extack, "Target policy not found");
4440 write_unlock_bh(&pol->lock);
4441 return -ENOENT;
4442 }
4443
4444 for (i = 0; i < pol->xfrm_nr; i++) {
4445 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4446 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4447 continue;
4448 n++;
4449 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4450 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4451 continue;
4452 /* update endpoints */
4453 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4454 sizeof(pol->xfrm_vec[i].id.daddr));
4455 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4456 sizeof(pol->xfrm_vec[i].saddr));
4457 pol->xfrm_vec[i].encap_family = mp->new_family;
4458 /* flush bundles */
4459 atomic_inc(&pol->genid);
4460 }
4461 }
4462
4463 write_unlock_bh(&pol->lock);
4464
4465 if (!n)
4466 return -ENODATA;
4467
4468 return 0;
4469 }
4470
xfrm_migrate_check(const struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4471 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4472 struct netlink_ext_ack *extack)
4473 {
4474 int i, j;
4475
4476 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4477 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4478 return -EINVAL;
4479 }
4480
4481 for (i = 0; i < num_migrate; i++) {
4482 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4483 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4484 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4485 return -EINVAL;
4486 }
4487
4488 /* check if there is any duplicated entry */
4489 for (j = i + 1; j < num_migrate; j++) {
4490 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4491 sizeof(m[i].old_daddr)) &&
4492 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4493 sizeof(m[i].old_saddr)) &&
4494 m[i].proto == m[j].proto &&
4495 m[i].mode == m[j].mode &&
4496 m[i].reqid == m[j].reqid &&
4497 m[i].old_family == m[j].old_family) {
4498 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4499 return -EINVAL;
4500 }
4501 }
4502 }
4503
4504 return 0;
4505 }
4506
xfrm_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,struct xfrm_migrate * m,int num_migrate,struct xfrm_kmaddress * k,struct net * net,struct xfrm_encap_tmpl * encap,u32 if_id,struct netlink_ext_ack * extack)4507 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4508 struct xfrm_migrate *m, int num_migrate,
4509 struct xfrm_kmaddress *k, struct net *net,
4510 struct xfrm_encap_tmpl *encap, u32 if_id,
4511 struct netlink_ext_ack *extack)
4512 {
4513 int i, err, nx_cur = 0, nx_new = 0;
4514 struct xfrm_policy *pol = NULL;
4515 struct xfrm_state *x, *xc;
4516 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4517 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4518 struct xfrm_migrate *mp;
4519
4520 /* Stage 0 - sanity checks */
4521 err = xfrm_migrate_check(m, num_migrate, extack);
4522 if (err < 0)
4523 goto out;
4524
4525 if (dir >= XFRM_POLICY_MAX) {
4526 NL_SET_ERR_MSG(extack, "Invalid policy direction");
4527 err = -EINVAL;
4528 goto out;
4529 }
4530
4531 /* Stage 1 - find policy */
4532 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4533 if (!pol) {
4534 NL_SET_ERR_MSG(extack, "Target policy not found");
4535 err = -ENOENT;
4536 goto out;
4537 }
4538
4539 /* Stage 2 - find and update state(s) */
4540 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4541 if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4542 x_cur[nx_cur] = x;
4543 nx_cur++;
4544 xc = xfrm_state_migrate(x, mp, encap);
4545 if (xc) {
4546 x_new[nx_new] = xc;
4547 nx_new++;
4548 } else {
4549 err = -ENODATA;
4550 goto restore_state;
4551 }
4552 }
4553 }
4554
4555 /* Stage 3 - update policy */
4556 err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4557 if (err < 0)
4558 goto restore_state;
4559
4560 /* Stage 4 - delete old state(s) */
4561 if (nx_cur) {
4562 xfrm_states_put(x_cur, nx_cur);
4563 xfrm_states_delete(x_cur, nx_cur);
4564 }
4565
4566 /* Stage 5 - announce */
4567 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4568
4569 xfrm_pol_put(pol);
4570
4571 return 0;
4572 out:
4573 return err;
4574
4575 restore_state:
4576 if (pol)
4577 xfrm_pol_put(pol);
4578 if (nx_cur)
4579 xfrm_states_put(x_cur, nx_cur);
4580 if (nx_new)
4581 xfrm_states_delete(x_new, nx_new);
4582
4583 return err;
4584 }
4585 EXPORT_SYMBOL(xfrm_migrate);
4586 #endif
4587