1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/inet_ecn.h>
35 #include <net/xfrm.h>
36 #include <net/ip.h>
37 #include <net/gre.h>
38 #if IS_ENABLED(CONFIG_IPV6_MIP6)
39 #include <net/mip6.h>
40 #endif
41 #ifdef CONFIG_XFRM_STATISTICS
42 #include <net/snmp.h>
43 #endif
44 #ifdef CONFIG_XFRM_ESPINTCP
45 #include <net/espintcp.h>
46 #endif
47
48 #include "xfrm_hash.h"
49
50 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
51 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
52 #define XFRM_MAX_QUEUE_LEN 100
53
54 struct xfrm_flo {
55 struct dst_entry *dst_orig;
56 u8 flags;
57 };
58
59 /* prefixes smaller than this are stored in lists, not trees. */
60 #define INEXACT_PREFIXLEN_IPV4 16
61 #define INEXACT_PREFIXLEN_IPV6 48
62
63 struct xfrm_pol_inexact_node {
64 struct rb_node node;
65 union {
66 xfrm_address_t addr;
67 struct rcu_head rcu;
68 };
69 u8 prefixlen;
70
71 struct rb_root root;
72
73 /* the policies matching this node, can be empty list */
74 struct hlist_head hhead;
75 };
76
77 /* xfrm inexact policy search tree:
78 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
79 * |
80 * +---- root_d: sorted by daddr:prefix
81 * | |
82 * | xfrm_pol_inexact_node
83 * | |
84 * | +- root: sorted by saddr/prefix
85 * | | |
86 * | | xfrm_pol_inexact_node
87 * | | |
88 * | | + root: unused
89 * | | |
90 * | | + hhead: saddr:daddr policies
91 * | |
92 * | +- coarse policies and all any:daddr policies
93 * |
94 * +---- root_s: sorted by saddr:prefix
95 * | |
96 * | xfrm_pol_inexact_node
97 * | |
98 * | + root: unused
99 * | |
100 * | + hhead: saddr:any policies
101 * |
102 * +---- coarse policies and all any:any policies
103 *
104 * Lookups return four candidate lists:
105 * 1. any:any list from top-level xfrm_pol_inexact_bin
106 * 2. any:daddr list from daddr tree
107 * 3. saddr:daddr list from 2nd level daddr tree
108 * 4. saddr:any list from saddr tree
109 *
110 * This result set then needs to be searched for the policy with
111 * the lowest priority. If two results have same prio, youngest one wins.
112 */
113
114 struct xfrm_pol_inexact_key {
115 possible_net_t net;
116 u32 if_id;
117 u16 family;
118 u8 dir, type;
119 };
120
121 struct xfrm_pol_inexact_bin {
122 struct xfrm_pol_inexact_key k;
123 struct rhash_head head;
124 /* list containing '*:*' policies */
125 struct hlist_head hhead;
126
127 seqcount_spinlock_t count;
128 /* tree sorted by daddr/prefix */
129 struct rb_root root_d;
130
131 /* tree sorted by saddr/prefix */
132 struct rb_root root_s;
133
134 /* slow path below */
135 struct list_head inexact_bins;
136 struct rcu_head rcu;
137 };
138
139 enum xfrm_pol_inexact_candidate_type {
140 XFRM_POL_CAND_BOTH,
141 XFRM_POL_CAND_SADDR,
142 XFRM_POL_CAND_DADDR,
143 XFRM_POL_CAND_ANY,
144
145 XFRM_POL_CAND_MAX,
146 };
147
148 struct xfrm_pol_inexact_candidates {
149 struct hlist_head *res[XFRM_POL_CAND_MAX];
150 };
151
152 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
153 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
154
155 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
156 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
157 __read_mostly;
158
159 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
160
161 static struct rhashtable xfrm_policy_inexact_table;
162 static const struct rhashtable_params xfrm_pol_inexact_params;
163
164 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
165 static int stale_bundle(struct dst_entry *dst);
166 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
167 static void xfrm_policy_queue_process(struct timer_list *t);
168
169 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
170 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
171 int dir);
172
173 static struct xfrm_pol_inexact_bin *
174 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
175 u32 if_id);
176
177 static struct xfrm_pol_inexact_bin *
178 xfrm_policy_inexact_lookup_rcu(struct net *net,
179 u8 type, u16 family, u8 dir, u32 if_id);
180 static struct xfrm_policy *
181 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
182 bool excl);
183 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
184 struct xfrm_policy *policy);
185
186 static bool
187 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
188 struct xfrm_pol_inexact_bin *b,
189 const xfrm_address_t *saddr,
190 const xfrm_address_t *daddr);
191
xfrm_pol_hold_rcu(struct xfrm_policy * policy)192 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
193 {
194 return refcount_inc_not_zero(&policy->refcnt);
195 }
196
197 static inline bool
__xfrm4_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)198 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
199 {
200 const struct flowi4 *fl4 = &fl->u.ip4;
201
202 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
203 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
204 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
205 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
206 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
207 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
208 }
209
210 static inline bool
__xfrm6_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)211 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
212 {
213 const struct flowi6 *fl6 = &fl->u.ip6;
214
215 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
216 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
217 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
218 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
219 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
220 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
221 }
222
xfrm_selector_match(const struct xfrm_selector * sel,const struct flowi * fl,unsigned short family)223 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
224 unsigned short family)
225 {
226 switch (family) {
227 case AF_INET:
228 return __xfrm4_selector_match(sel, fl);
229 case AF_INET6:
230 return __xfrm6_selector_match(sel, fl);
231 }
232 return false;
233 }
234
xfrm_policy_get_afinfo(unsigned short family)235 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
236 {
237 const struct xfrm_policy_afinfo *afinfo;
238
239 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
240 return NULL;
241 rcu_read_lock();
242 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
243 if (unlikely(!afinfo))
244 rcu_read_unlock();
245 return afinfo;
246 }
247
248 /* Called with rcu_read_lock(). */
xfrm_if_get_cb(void)249 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
250 {
251 return rcu_dereference(xfrm_if_cb);
252 }
253
__xfrm_dst_lookup(int family,const struct xfrm_dst_lookup_params * params)254 struct dst_entry *__xfrm_dst_lookup(int family,
255 const struct xfrm_dst_lookup_params *params)
256 {
257 const struct xfrm_policy_afinfo *afinfo;
258 struct dst_entry *dst;
259
260 afinfo = xfrm_policy_get_afinfo(family);
261 if (unlikely(afinfo == NULL))
262 return ERR_PTR(-EAFNOSUPPORT);
263
264 dst = afinfo->dst_lookup(params);
265
266 rcu_read_unlock();
267
268 return dst;
269 }
270 EXPORT_SYMBOL(__xfrm_dst_lookup);
271
xfrm_dst_lookup(struct xfrm_state * x,int tos,int oif,xfrm_address_t * prev_saddr,xfrm_address_t * prev_daddr,int family,u32 mark)272 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
273 int tos, int oif,
274 xfrm_address_t *prev_saddr,
275 xfrm_address_t *prev_daddr,
276 int family, u32 mark)
277 {
278 struct xfrm_dst_lookup_params params;
279 struct net *net = xs_net(x);
280 xfrm_address_t *saddr = &x->props.saddr;
281 xfrm_address_t *daddr = &x->id.daddr;
282 struct dst_entry *dst;
283
284 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
285 saddr = x->coaddr;
286 daddr = prev_daddr;
287 }
288 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
289 saddr = prev_saddr;
290 daddr = x->coaddr;
291 }
292
293 params.net = net;
294 params.saddr = saddr;
295 params.daddr = daddr;
296 params.tos = tos;
297 params.oif = oif;
298 params.mark = mark;
299 params.ipproto = x->id.proto;
300 if (x->encap) {
301 switch (x->encap->encap_type) {
302 case UDP_ENCAP_ESPINUDP:
303 params.ipproto = IPPROTO_UDP;
304 params.uli.ports.sport = x->encap->encap_sport;
305 params.uli.ports.dport = x->encap->encap_dport;
306 break;
307 case TCP_ENCAP_ESPINTCP:
308 params.ipproto = IPPROTO_TCP;
309 params.uli.ports.sport = x->encap->encap_sport;
310 params.uli.ports.dport = x->encap->encap_dport;
311 break;
312 }
313 }
314
315 dst = __xfrm_dst_lookup(family, ¶ms);
316
317 if (!IS_ERR(dst)) {
318 if (prev_saddr != saddr)
319 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
320 if (prev_daddr != daddr)
321 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
322 }
323
324 return dst;
325 }
326
make_jiffies(long secs)327 static inline unsigned long make_jiffies(long secs)
328 {
329 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
330 return MAX_SCHEDULE_TIMEOUT-1;
331 else
332 return secs*HZ;
333 }
334
xfrm_policy_timer(struct timer_list * t)335 static void xfrm_policy_timer(struct timer_list *t)
336 {
337 struct xfrm_policy *xp = from_timer(xp, t, timer);
338 time64_t now = ktime_get_real_seconds();
339 time64_t next = TIME64_MAX;
340 int warn = 0;
341 int dir;
342
343 read_lock(&xp->lock);
344
345 if (unlikely(xp->walk.dead))
346 goto out;
347
348 dir = xfrm_policy_id2dir(xp->index);
349
350 if (xp->lft.hard_add_expires_seconds) {
351 time64_t tmo = xp->lft.hard_add_expires_seconds +
352 xp->curlft.add_time - now;
353 if (tmo <= 0)
354 goto expired;
355 if (tmo < next)
356 next = tmo;
357 }
358 if (xp->lft.hard_use_expires_seconds) {
359 time64_t tmo = xp->lft.hard_use_expires_seconds +
360 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
361 if (tmo <= 0)
362 goto expired;
363 if (tmo < next)
364 next = tmo;
365 }
366 if (xp->lft.soft_add_expires_seconds) {
367 time64_t tmo = xp->lft.soft_add_expires_seconds +
368 xp->curlft.add_time - now;
369 if (tmo <= 0) {
370 warn = 1;
371 tmo = XFRM_KM_TIMEOUT;
372 }
373 if (tmo < next)
374 next = tmo;
375 }
376 if (xp->lft.soft_use_expires_seconds) {
377 time64_t tmo = xp->lft.soft_use_expires_seconds +
378 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
379 if (tmo <= 0) {
380 warn = 1;
381 tmo = XFRM_KM_TIMEOUT;
382 }
383 if (tmo < next)
384 next = tmo;
385 }
386
387 if (warn)
388 km_policy_expired(xp, dir, 0, 0);
389 if (next != TIME64_MAX &&
390 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
391 xfrm_pol_hold(xp);
392
393 out:
394 read_unlock(&xp->lock);
395 xfrm_pol_put(xp);
396 return;
397
398 expired:
399 read_unlock(&xp->lock);
400 if (!xfrm_policy_delete(xp, dir))
401 km_policy_expired(xp, dir, 1, 0);
402 xfrm_pol_put(xp);
403 }
404
405 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
406 * SPD calls.
407 */
408
xfrm_policy_alloc(struct net * net,gfp_t gfp)409 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
410 {
411 struct xfrm_policy *policy;
412
413 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
414
415 if (policy) {
416 write_pnet(&policy->xp_net, net);
417 INIT_LIST_HEAD(&policy->walk.all);
418 INIT_HLIST_NODE(&policy->bydst_inexact_list);
419 INIT_HLIST_NODE(&policy->bydst);
420 INIT_HLIST_NODE(&policy->byidx);
421 rwlock_init(&policy->lock);
422 refcount_set(&policy->refcnt, 1);
423 skb_queue_head_init(&policy->polq.hold_queue);
424 timer_setup(&policy->timer, xfrm_policy_timer, 0);
425 timer_setup(&policy->polq.hold_timer,
426 xfrm_policy_queue_process, 0);
427 }
428 return policy;
429 }
430 EXPORT_SYMBOL(xfrm_policy_alloc);
431
xfrm_policy_destroy_rcu(struct rcu_head * head)432 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
433 {
434 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
435
436 security_xfrm_policy_free(policy->security);
437 kfree(policy);
438 }
439
440 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
441
xfrm_policy_destroy(struct xfrm_policy * policy)442 void xfrm_policy_destroy(struct xfrm_policy *policy)
443 {
444 BUG_ON(!policy->walk.dead);
445
446 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
447 BUG();
448
449 xfrm_dev_policy_free(policy);
450 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
451 }
452 EXPORT_SYMBOL(xfrm_policy_destroy);
453
454 /* Rule must be locked. Release descendant resources, announce
455 * entry dead. The rule must be unlinked from lists to the moment.
456 */
457
xfrm_policy_kill(struct xfrm_policy * policy)458 static void xfrm_policy_kill(struct xfrm_policy *policy)
459 {
460 xfrm_dev_policy_delete(policy);
461
462 write_lock_bh(&policy->lock);
463 policy->walk.dead = 1;
464 write_unlock_bh(&policy->lock);
465
466 atomic_inc(&policy->genid);
467
468 if (del_timer(&policy->polq.hold_timer))
469 xfrm_pol_put(policy);
470 skb_queue_purge(&policy->polq.hold_queue);
471
472 if (del_timer(&policy->timer))
473 xfrm_pol_put(policy);
474
475 xfrm_pol_put(policy);
476 }
477
478 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
479
idx_hash(struct net * net,u32 index)480 static inline unsigned int idx_hash(struct net *net, u32 index)
481 {
482 return __idx_hash(index, net->xfrm.policy_idx_hmask);
483 }
484
485 /* calculate policy hash thresholds */
__get_hash_thresh(struct net * net,unsigned short family,int dir,u8 * dbits,u8 * sbits)486 static void __get_hash_thresh(struct net *net,
487 unsigned short family, int dir,
488 u8 *dbits, u8 *sbits)
489 {
490 switch (family) {
491 case AF_INET:
492 *dbits = net->xfrm.policy_bydst[dir].dbits4;
493 *sbits = net->xfrm.policy_bydst[dir].sbits4;
494 break;
495
496 case AF_INET6:
497 *dbits = net->xfrm.policy_bydst[dir].dbits6;
498 *sbits = net->xfrm.policy_bydst[dir].sbits6;
499 break;
500
501 default:
502 *dbits = 0;
503 *sbits = 0;
504 }
505 }
506
policy_hash_bysel(struct net * net,const struct xfrm_selector * sel,unsigned short family,int dir)507 static struct hlist_head *policy_hash_bysel(struct net *net,
508 const struct xfrm_selector *sel,
509 unsigned short family, int dir)
510 {
511 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
512 unsigned int hash;
513 u8 dbits;
514 u8 sbits;
515
516 __get_hash_thresh(net, family, dir, &dbits, &sbits);
517 hash = __sel_hash(sel, family, hmask, dbits, sbits);
518
519 if (hash == hmask + 1)
520 return NULL;
521
522 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
523 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
524 }
525
policy_hash_direct(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family,int dir)526 static struct hlist_head *policy_hash_direct(struct net *net,
527 const xfrm_address_t *daddr,
528 const xfrm_address_t *saddr,
529 unsigned short family, int dir)
530 {
531 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
532 unsigned int hash;
533 u8 dbits;
534 u8 sbits;
535
536 __get_hash_thresh(net, family, dir, &dbits, &sbits);
537 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
538
539 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
540 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
541 }
542
xfrm_dst_hash_transfer(struct net * net,struct hlist_head * list,struct hlist_head * ndsttable,unsigned int nhashmask,int dir)543 static void xfrm_dst_hash_transfer(struct net *net,
544 struct hlist_head *list,
545 struct hlist_head *ndsttable,
546 unsigned int nhashmask,
547 int dir)
548 {
549 struct hlist_node *tmp, *entry0 = NULL;
550 struct xfrm_policy *pol;
551 unsigned int h0 = 0;
552 u8 dbits;
553 u8 sbits;
554
555 redo:
556 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
557 unsigned int h;
558
559 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
560 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
561 pol->family, nhashmask, dbits, sbits);
562 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
563 hlist_del_rcu(&pol->bydst);
564 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
565 h0 = h;
566 } else {
567 if (h != h0)
568 continue;
569 hlist_del_rcu(&pol->bydst);
570 hlist_add_behind_rcu(&pol->bydst, entry0);
571 }
572 entry0 = &pol->bydst;
573 }
574 if (!hlist_empty(list)) {
575 entry0 = NULL;
576 goto redo;
577 }
578 }
579
xfrm_idx_hash_transfer(struct hlist_head * list,struct hlist_head * nidxtable,unsigned int nhashmask)580 static void xfrm_idx_hash_transfer(struct hlist_head *list,
581 struct hlist_head *nidxtable,
582 unsigned int nhashmask)
583 {
584 struct hlist_node *tmp;
585 struct xfrm_policy *pol;
586
587 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
588 unsigned int h;
589
590 h = __idx_hash(pol->index, nhashmask);
591 hlist_add_head(&pol->byidx, nidxtable+h);
592 }
593 }
594
xfrm_new_hash_mask(unsigned int old_hmask)595 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
596 {
597 return ((old_hmask + 1) << 1) - 1;
598 }
599
xfrm_bydst_resize(struct net * net,int dir)600 static void xfrm_bydst_resize(struct net *net, int dir)
601 {
602 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
603 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
604 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
605 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
606 struct hlist_head *odst;
607 int i;
608
609 if (!ndst)
610 return;
611
612 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
613 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
614
615 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
616 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
617
618 for (i = hmask; i >= 0; i--)
619 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
620
621 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
622 net->xfrm.policy_bydst[dir].hmask = nhashmask;
623
624 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
625 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
626
627 synchronize_rcu();
628
629 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
630 }
631
xfrm_byidx_resize(struct net * net)632 static void xfrm_byidx_resize(struct net *net)
633 {
634 unsigned int hmask = net->xfrm.policy_idx_hmask;
635 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
636 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
637 struct hlist_head *oidx = net->xfrm.policy_byidx;
638 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
639 int i;
640
641 if (!nidx)
642 return;
643
644 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
645
646 for (i = hmask; i >= 0; i--)
647 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
648
649 net->xfrm.policy_byidx = nidx;
650 net->xfrm.policy_idx_hmask = nhashmask;
651
652 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
653
654 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
655 }
656
xfrm_bydst_should_resize(struct net * net,int dir,int * total)657 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
658 {
659 unsigned int cnt = net->xfrm.policy_count[dir];
660 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
661
662 if (total)
663 *total += cnt;
664
665 if ((hmask + 1) < xfrm_policy_hashmax &&
666 cnt > hmask)
667 return 1;
668
669 return 0;
670 }
671
xfrm_byidx_should_resize(struct net * net,int total)672 static inline int xfrm_byidx_should_resize(struct net *net, int total)
673 {
674 unsigned int hmask = net->xfrm.policy_idx_hmask;
675
676 if ((hmask + 1) < xfrm_policy_hashmax &&
677 total > hmask)
678 return 1;
679
680 return 0;
681 }
682
xfrm_spd_getinfo(struct net * net,struct xfrmk_spdinfo * si)683 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
684 {
685 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
686 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
687 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
688 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
689 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
690 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
691 si->spdhcnt = net->xfrm.policy_idx_hmask;
692 si->spdhmcnt = xfrm_policy_hashmax;
693 }
694 EXPORT_SYMBOL(xfrm_spd_getinfo);
695
696 static DEFINE_MUTEX(hash_resize_mutex);
xfrm_hash_resize(struct work_struct * work)697 static void xfrm_hash_resize(struct work_struct *work)
698 {
699 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
700 int dir, total;
701
702 mutex_lock(&hash_resize_mutex);
703
704 total = 0;
705 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
706 if (xfrm_bydst_should_resize(net, dir, &total))
707 xfrm_bydst_resize(net, dir);
708 }
709 if (xfrm_byidx_should_resize(net, total))
710 xfrm_byidx_resize(net);
711
712 mutex_unlock(&hash_resize_mutex);
713 }
714
715 /* Make sure *pol can be inserted into fastbin.
716 * Useful to check that later insert requests will be successful
717 * (provided xfrm_policy_lock is held throughout).
718 */
719 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy * pol,u8 dir)720 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
721 {
722 struct xfrm_pol_inexact_bin *bin, *prev;
723 struct xfrm_pol_inexact_key k = {
724 .family = pol->family,
725 .type = pol->type,
726 .dir = dir,
727 .if_id = pol->if_id,
728 };
729 struct net *net = xp_net(pol);
730
731 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
732
733 write_pnet(&k.net, net);
734 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
735 xfrm_pol_inexact_params);
736 if (bin)
737 return bin;
738
739 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
740 if (!bin)
741 return NULL;
742
743 bin->k = k;
744 INIT_HLIST_HEAD(&bin->hhead);
745 bin->root_d = RB_ROOT;
746 bin->root_s = RB_ROOT;
747 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
748
749 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
750 &bin->k, &bin->head,
751 xfrm_pol_inexact_params);
752 if (!prev) {
753 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
754 return bin;
755 }
756
757 kfree(bin);
758
759 return IS_ERR(prev) ? NULL : prev;
760 }
761
xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t * addr,int family,u8 prefixlen)762 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
763 int family, u8 prefixlen)
764 {
765 if (xfrm_addr_any(addr, family))
766 return true;
767
768 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
769 return true;
770
771 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
772 return true;
773
774 return false;
775 }
776
777 static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy * policy)778 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
779 {
780 const xfrm_address_t *addr;
781 bool saddr_any, daddr_any;
782 u8 prefixlen;
783
784 addr = &policy->selector.saddr;
785 prefixlen = policy->selector.prefixlen_s;
786
787 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
788 policy->family,
789 prefixlen);
790 addr = &policy->selector.daddr;
791 prefixlen = policy->selector.prefixlen_d;
792 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
793 policy->family,
794 prefixlen);
795 return saddr_any && daddr_any;
796 }
797
xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node * node,const xfrm_address_t * addr,u8 prefixlen)798 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
799 const xfrm_address_t *addr, u8 prefixlen)
800 {
801 node->addr = *addr;
802 node->prefixlen = prefixlen;
803 }
804
805 static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t * addr,u8 prefixlen)806 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
807 {
808 struct xfrm_pol_inexact_node *node;
809
810 node = kzalloc(sizeof(*node), GFP_ATOMIC);
811 if (node)
812 xfrm_pol_inexact_node_init(node, addr, prefixlen);
813
814 return node;
815 }
816
xfrm_policy_addr_delta(const xfrm_address_t * a,const xfrm_address_t * b,u8 prefixlen,u16 family)817 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
818 const xfrm_address_t *b,
819 u8 prefixlen, u16 family)
820 {
821 u32 ma, mb, mask;
822 unsigned int pdw, pbi;
823 int delta = 0;
824
825 switch (family) {
826 case AF_INET:
827 if (prefixlen == 0)
828 return 0;
829 mask = ~0U << (32 - prefixlen);
830 ma = ntohl(a->a4) & mask;
831 mb = ntohl(b->a4) & mask;
832 if (ma < mb)
833 delta = -1;
834 else if (ma > mb)
835 delta = 1;
836 break;
837 case AF_INET6:
838 pdw = prefixlen >> 5;
839 pbi = prefixlen & 0x1f;
840
841 if (pdw) {
842 delta = memcmp(a->a6, b->a6, pdw << 2);
843 if (delta)
844 return delta;
845 }
846 if (pbi) {
847 mask = ~0U << (32 - pbi);
848 ma = ntohl(a->a6[pdw]) & mask;
849 mb = ntohl(b->a6[pdw]) & mask;
850 if (ma < mb)
851 delta = -1;
852 else if (ma > mb)
853 delta = 1;
854 }
855 break;
856 default:
857 break;
858 }
859
860 return delta;
861 }
862
xfrm_policy_inexact_list_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,u16 family)863 static void xfrm_policy_inexact_list_reinsert(struct net *net,
864 struct xfrm_pol_inexact_node *n,
865 u16 family)
866 {
867 unsigned int matched_s, matched_d;
868 struct xfrm_policy *policy, *p;
869
870 matched_s = 0;
871 matched_d = 0;
872
873 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
874 struct hlist_node *newpos = NULL;
875 bool matches_s, matches_d;
876
877 if (policy->walk.dead || !policy->bydst_reinsert)
878 continue;
879
880 WARN_ON_ONCE(policy->family != family);
881
882 policy->bydst_reinsert = false;
883 hlist_for_each_entry(p, &n->hhead, bydst) {
884 if (policy->priority > p->priority)
885 newpos = &p->bydst;
886 else if (policy->priority == p->priority &&
887 policy->pos > p->pos)
888 newpos = &p->bydst;
889 else
890 break;
891 }
892
893 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
894 hlist_add_behind_rcu(&policy->bydst, newpos);
895 else
896 hlist_add_head_rcu(&policy->bydst, &n->hhead);
897
898 /* paranoia checks follow.
899 * Check that the reinserted policy matches at least
900 * saddr or daddr for current node prefix.
901 *
902 * Matching both is fine, matching saddr in one policy
903 * (but not daddr) and then matching only daddr in another
904 * is a bug.
905 */
906 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
907 &n->addr,
908 n->prefixlen,
909 family) == 0;
910 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
911 &n->addr,
912 n->prefixlen,
913 family) == 0;
914 if (matches_s && matches_d)
915 continue;
916
917 WARN_ON_ONCE(!matches_s && !matches_d);
918 if (matches_s)
919 matched_s++;
920 if (matches_d)
921 matched_d++;
922 WARN_ON_ONCE(matched_s && matched_d);
923 }
924 }
925
xfrm_policy_inexact_node_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,struct rb_root * new,u16 family)926 static void xfrm_policy_inexact_node_reinsert(struct net *net,
927 struct xfrm_pol_inexact_node *n,
928 struct rb_root *new,
929 u16 family)
930 {
931 struct xfrm_pol_inexact_node *node;
932 struct rb_node **p, *parent;
933
934 /* we should not have another subtree here */
935 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
936 restart:
937 parent = NULL;
938 p = &new->rb_node;
939 while (*p) {
940 u8 prefixlen;
941 int delta;
942
943 parent = *p;
944 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
945
946 prefixlen = min(node->prefixlen, n->prefixlen);
947
948 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
949 prefixlen, family);
950 if (delta < 0) {
951 p = &parent->rb_left;
952 } else if (delta > 0) {
953 p = &parent->rb_right;
954 } else {
955 bool same_prefixlen = node->prefixlen == n->prefixlen;
956 struct xfrm_policy *tmp;
957
958 hlist_for_each_entry(tmp, &n->hhead, bydst) {
959 tmp->bydst_reinsert = true;
960 hlist_del_rcu(&tmp->bydst);
961 }
962
963 node->prefixlen = prefixlen;
964
965 xfrm_policy_inexact_list_reinsert(net, node, family);
966
967 if (same_prefixlen) {
968 kfree_rcu(n, rcu);
969 return;
970 }
971
972 rb_erase(*p, new);
973 kfree_rcu(n, rcu);
974 n = node;
975 goto restart;
976 }
977 }
978
979 rb_link_node_rcu(&n->node, parent, p);
980 rb_insert_color(&n->node, new);
981 }
982
983 /* merge nodes v and n */
xfrm_policy_inexact_node_merge(struct net * net,struct xfrm_pol_inexact_node * v,struct xfrm_pol_inexact_node * n,u16 family)984 static void xfrm_policy_inexact_node_merge(struct net *net,
985 struct xfrm_pol_inexact_node *v,
986 struct xfrm_pol_inexact_node *n,
987 u16 family)
988 {
989 struct xfrm_pol_inexact_node *node;
990 struct xfrm_policy *tmp;
991 struct rb_node *rnode;
992
993 /* To-be-merged node v has a subtree.
994 *
995 * Dismantle it and insert its nodes to n->root.
996 */
997 while ((rnode = rb_first(&v->root)) != NULL) {
998 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
999 rb_erase(&node->node, &v->root);
1000 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
1001 family);
1002 }
1003
1004 hlist_for_each_entry(tmp, &v->hhead, bydst) {
1005 tmp->bydst_reinsert = true;
1006 hlist_del_rcu(&tmp->bydst);
1007 }
1008
1009 xfrm_policy_inexact_list_reinsert(net, n, family);
1010 }
1011
1012 static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net * net,struct rb_root * root,xfrm_address_t * addr,u16 family,u8 prefixlen,u8 dir)1013 xfrm_policy_inexact_insert_node(struct net *net,
1014 struct rb_root *root,
1015 xfrm_address_t *addr,
1016 u16 family, u8 prefixlen, u8 dir)
1017 {
1018 struct xfrm_pol_inexact_node *cached = NULL;
1019 struct rb_node **p, *parent = NULL;
1020 struct xfrm_pol_inexact_node *node;
1021
1022 p = &root->rb_node;
1023 while (*p) {
1024 int delta;
1025
1026 parent = *p;
1027 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1028
1029 delta = xfrm_policy_addr_delta(addr, &node->addr,
1030 node->prefixlen,
1031 family);
1032 if (delta == 0 && prefixlen >= node->prefixlen) {
1033 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1034 return node;
1035 }
1036
1037 if (delta < 0)
1038 p = &parent->rb_left;
1039 else
1040 p = &parent->rb_right;
1041
1042 if (prefixlen < node->prefixlen) {
1043 delta = xfrm_policy_addr_delta(addr, &node->addr,
1044 prefixlen,
1045 family);
1046 if (delta)
1047 continue;
1048
1049 /* This node is a subnet of the new prefix. It needs
1050 * to be removed and re-inserted with the smaller
1051 * prefix and all nodes that are now also covered
1052 * by the reduced prefixlen.
1053 */
1054 rb_erase(&node->node, root);
1055
1056 if (!cached) {
1057 xfrm_pol_inexact_node_init(node, addr,
1058 prefixlen);
1059 cached = node;
1060 } else {
1061 /* This node also falls within the new
1062 * prefixlen. Merge the to-be-reinserted
1063 * node and this one.
1064 */
1065 xfrm_policy_inexact_node_merge(net, node,
1066 cached, family);
1067 kfree_rcu(node, rcu);
1068 }
1069
1070 /* restart */
1071 p = &root->rb_node;
1072 parent = NULL;
1073 }
1074 }
1075
1076 node = cached;
1077 if (!node) {
1078 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1079 if (!node)
1080 return NULL;
1081 }
1082
1083 rb_link_node_rcu(&node->node, parent, p);
1084 rb_insert_color(&node->node, root);
1085
1086 return node;
1087 }
1088
xfrm_policy_inexact_gc_tree(struct rb_root * r,bool rm)1089 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1090 {
1091 struct xfrm_pol_inexact_node *node;
1092 struct rb_node *rn = rb_first(r);
1093
1094 while (rn) {
1095 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1096
1097 xfrm_policy_inexact_gc_tree(&node->root, rm);
1098 rn = rb_next(rn);
1099
1100 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1101 WARN_ON_ONCE(rm);
1102 continue;
1103 }
1104
1105 rb_erase(&node->node, r);
1106 kfree_rcu(node, rcu);
1107 }
1108 }
1109
__xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b,bool net_exit)1110 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1111 {
1112 write_seqcount_begin(&b->count);
1113 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1114 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1115 write_seqcount_end(&b->count);
1116
1117 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1118 !hlist_empty(&b->hhead)) {
1119 WARN_ON_ONCE(net_exit);
1120 return;
1121 }
1122
1123 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1124 xfrm_pol_inexact_params) == 0) {
1125 list_del(&b->inexact_bins);
1126 kfree_rcu(b, rcu);
1127 }
1128 }
1129
xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b)1130 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1131 {
1132 struct net *net = read_pnet(&b->k.net);
1133
1134 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1135 __xfrm_policy_inexact_prune_bin(b, false);
1136 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1137 }
1138
__xfrm_policy_inexact_flush(struct net * net)1139 static void __xfrm_policy_inexact_flush(struct net *net)
1140 {
1141 struct xfrm_pol_inexact_bin *bin, *t;
1142
1143 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1144
1145 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1146 __xfrm_policy_inexact_prune_bin(bin, false);
1147 }
1148
1149 static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin * bin,struct xfrm_policy * policy,u8 dir)1150 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1151 struct xfrm_policy *policy, u8 dir)
1152 {
1153 struct xfrm_pol_inexact_node *n;
1154 struct net *net;
1155
1156 net = xp_net(policy);
1157 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1158
1159 if (xfrm_policy_inexact_insert_use_any_list(policy))
1160 return &bin->hhead;
1161
1162 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1163 policy->family,
1164 policy->selector.prefixlen_d)) {
1165 write_seqcount_begin(&bin->count);
1166 n = xfrm_policy_inexact_insert_node(net,
1167 &bin->root_s,
1168 &policy->selector.saddr,
1169 policy->family,
1170 policy->selector.prefixlen_s,
1171 dir);
1172 write_seqcount_end(&bin->count);
1173 if (!n)
1174 return NULL;
1175
1176 return &n->hhead;
1177 }
1178
1179 /* daddr is fixed */
1180 write_seqcount_begin(&bin->count);
1181 n = xfrm_policy_inexact_insert_node(net,
1182 &bin->root_d,
1183 &policy->selector.daddr,
1184 policy->family,
1185 policy->selector.prefixlen_d, dir);
1186 write_seqcount_end(&bin->count);
1187 if (!n)
1188 return NULL;
1189
1190 /* saddr is wildcard */
1191 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1192 policy->family,
1193 policy->selector.prefixlen_s))
1194 return &n->hhead;
1195
1196 write_seqcount_begin(&bin->count);
1197 n = xfrm_policy_inexact_insert_node(net,
1198 &n->root,
1199 &policy->selector.saddr,
1200 policy->family,
1201 policy->selector.prefixlen_s, dir);
1202 write_seqcount_end(&bin->count);
1203 if (!n)
1204 return NULL;
1205
1206 return &n->hhead;
1207 }
1208
1209 static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy * policy,u8 dir,int excl)1210 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1211 {
1212 struct xfrm_pol_inexact_bin *bin;
1213 struct xfrm_policy *delpol;
1214 struct hlist_head *chain;
1215 struct net *net;
1216
1217 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1218 if (!bin)
1219 return ERR_PTR(-ENOMEM);
1220
1221 net = xp_net(policy);
1222 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1223
1224 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1225 if (!chain) {
1226 __xfrm_policy_inexact_prune_bin(bin, false);
1227 return ERR_PTR(-ENOMEM);
1228 }
1229
1230 delpol = xfrm_policy_insert_list(chain, policy, excl);
1231 if (delpol && excl) {
1232 __xfrm_policy_inexact_prune_bin(bin, false);
1233 return ERR_PTR(-EEXIST);
1234 }
1235
1236 chain = &net->xfrm.policy_inexact[dir];
1237 xfrm_policy_insert_inexact_list(chain, policy);
1238
1239 if (delpol)
1240 __xfrm_policy_inexact_prune_bin(bin, false);
1241
1242 return delpol;
1243 }
1244
xfrm_hash_rebuild(struct work_struct * work)1245 static void xfrm_hash_rebuild(struct work_struct *work)
1246 {
1247 struct net *net = container_of(work, struct net,
1248 xfrm.policy_hthresh.work);
1249 unsigned int hmask;
1250 struct xfrm_policy *pol;
1251 struct xfrm_policy *policy;
1252 struct hlist_head *chain;
1253 struct hlist_head *odst;
1254 struct hlist_node *newpos;
1255 int i;
1256 int dir;
1257 unsigned seq;
1258 u8 lbits4, rbits4, lbits6, rbits6;
1259
1260 mutex_lock(&hash_resize_mutex);
1261
1262 /* read selector prefixlen thresholds */
1263 do {
1264 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1265
1266 lbits4 = net->xfrm.policy_hthresh.lbits4;
1267 rbits4 = net->xfrm.policy_hthresh.rbits4;
1268 lbits6 = net->xfrm.policy_hthresh.lbits6;
1269 rbits6 = net->xfrm.policy_hthresh.rbits6;
1270 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1271
1272 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1273 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1274
1275 /* make sure that we can insert the indirect policies again before
1276 * we start with destructive action.
1277 */
1278 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1279 struct xfrm_pol_inexact_bin *bin;
1280 u8 dbits, sbits;
1281
1282 if (policy->walk.dead)
1283 continue;
1284
1285 dir = xfrm_policy_id2dir(policy->index);
1286 if (dir >= XFRM_POLICY_MAX)
1287 continue;
1288
1289 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1290 if (policy->family == AF_INET) {
1291 dbits = rbits4;
1292 sbits = lbits4;
1293 } else {
1294 dbits = rbits6;
1295 sbits = lbits6;
1296 }
1297 } else {
1298 if (policy->family == AF_INET) {
1299 dbits = lbits4;
1300 sbits = rbits4;
1301 } else {
1302 dbits = lbits6;
1303 sbits = rbits6;
1304 }
1305 }
1306
1307 if (policy->selector.prefixlen_d < dbits ||
1308 policy->selector.prefixlen_s < sbits)
1309 continue;
1310
1311 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1312 if (!bin)
1313 goto out_unlock;
1314
1315 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1316 goto out_unlock;
1317 }
1318
1319 /* reset the bydst and inexact table in all directions */
1320 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1321 struct hlist_node *n;
1322
1323 hlist_for_each_entry_safe(policy, n,
1324 &net->xfrm.policy_inexact[dir],
1325 bydst_inexact_list) {
1326 hlist_del_rcu(&policy->bydst);
1327 hlist_del_init(&policy->bydst_inexact_list);
1328 }
1329
1330 hmask = net->xfrm.policy_bydst[dir].hmask;
1331 odst = net->xfrm.policy_bydst[dir].table;
1332 for (i = hmask; i >= 0; i--) {
1333 hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1334 hlist_del_rcu(&policy->bydst);
1335 }
1336 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1337 /* dir out => dst = remote, src = local */
1338 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1339 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1340 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1341 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1342 } else {
1343 /* dir in/fwd => dst = local, src = remote */
1344 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1345 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1346 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1347 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1348 }
1349 }
1350
1351 /* re-insert all policies by order of creation */
1352 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1353 if (policy->walk.dead)
1354 continue;
1355 dir = xfrm_policy_id2dir(policy->index);
1356 if (dir >= XFRM_POLICY_MAX) {
1357 /* skip socket policies */
1358 continue;
1359 }
1360 newpos = NULL;
1361 chain = policy_hash_bysel(net, &policy->selector,
1362 policy->family, dir);
1363
1364 if (!chain) {
1365 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1366
1367 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1368 continue;
1369 }
1370
1371 hlist_for_each_entry(pol, chain, bydst) {
1372 if (policy->priority >= pol->priority)
1373 newpos = &pol->bydst;
1374 else
1375 break;
1376 }
1377 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1378 hlist_add_behind_rcu(&policy->bydst, newpos);
1379 else
1380 hlist_add_head_rcu(&policy->bydst, chain);
1381 }
1382
1383 out_unlock:
1384 __xfrm_policy_inexact_flush(net);
1385 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1386 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1387
1388 mutex_unlock(&hash_resize_mutex);
1389 }
1390
xfrm_policy_hash_rebuild(struct net * net)1391 void xfrm_policy_hash_rebuild(struct net *net)
1392 {
1393 schedule_work(&net->xfrm.policy_hthresh.work);
1394 }
1395 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1396
1397 /* Generate new index... KAME seems to generate them ordered by cost
1398 * of an absolute inpredictability of ordering of rules. This will not pass. */
xfrm_gen_index(struct net * net,int dir,u32 index)1399 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1400 {
1401 for (;;) {
1402 struct hlist_head *list;
1403 struct xfrm_policy *p;
1404 u32 idx;
1405 int found;
1406
1407 if (!index) {
1408 idx = (net->xfrm.idx_generator | dir);
1409 net->xfrm.idx_generator += 8;
1410 } else {
1411 idx = index;
1412 index = 0;
1413 }
1414
1415 if (idx == 0)
1416 idx = 8;
1417 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1418 found = 0;
1419 hlist_for_each_entry(p, list, byidx) {
1420 if (p->index == idx) {
1421 found = 1;
1422 break;
1423 }
1424 }
1425 if (!found)
1426 return idx;
1427 }
1428 }
1429
selector_cmp(struct xfrm_selector * s1,struct xfrm_selector * s2)1430 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1431 {
1432 u32 *p1 = (u32 *) s1;
1433 u32 *p2 = (u32 *) s2;
1434 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1435 int i;
1436
1437 for (i = 0; i < len; i++) {
1438 if (p1[i] != p2[i])
1439 return 1;
1440 }
1441
1442 return 0;
1443 }
1444
xfrm_policy_requeue(struct xfrm_policy * old,struct xfrm_policy * new)1445 static void xfrm_policy_requeue(struct xfrm_policy *old,
1446 struct xfrm_policy *new)
1447 {
1448 struct xfrm_policy_queue *pq = &old->polq;
1449 struct sk_buff_head list;
1450
1451 if (skb_queue_empty(&pq->hold_queue))
1452 return;
1453
1454 __skb_queue_head_init(&list);
1455
1456 spin_lock_bh(&pq->hold_queue.lock);
1457 skb_queue_splice_init(&pq->hold_queue, &list);
1458 if (del_timer(&pq->hold_timer))
1459 xfrm_pol_put(old);
1460 spin_unlock_bh(&pq->hold_queue.lock);
1461
1462 pq = &new->polq;
1463
1464 spin_lock_bh(&pq->hold_queue.lock);
1465 skb_queue_splice(&list, &pq->hold_queue);
1466 pq->timeout = XFRM_QUEUE_TMO_MIN;
1467 if (!mod_timer(&pq->hold_timer, jiffies))
1468 xfrm_pol_hold(new);
1469 spin_unlock_bh(&pq->hold_queue.lock);
1470 }
1471
xfrm_policy_mark_match(const struct xfrm_mark * mark,struct xfrm_policy * pol)1472 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1473 struct xfrm_policy *pol)
1474 {
1475 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1476 }
1477
xfrm_pol_bin_key(const void * data,u32 len,u32 seed)1478 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1479 {
1480 const struct xfrm_pol_inexact_key *k = data;
1481 u32 a = k->type << 24 | k->dir << 16 | k->family;
1482
1483 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1484 seed);
1485 }
1486
xfrm_pol_bin_obj(const void * data,u32 len,u32 seed)1487 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1488 {
1489 const struct xfrm_pol_inexact_bin *b = data;
1490
1491 return xfrm_pol_bin_key(&b->k, 0, seed);
1492 }
1493
xfrm_pol_bin_cmp(struct rhashtable_compare_arg * arg,const void * ptr)1494 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1495 const void *ptr)
1496 {
1497 const struct xfrm_pol_inexact_key *key = arg->key;
1498 const struct xfrm_pol_inexact_bin *b = ptr;
1499 int ret;
1500
1501 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1502 return -1;
1503
1504 ret = b->k.dir ^ key->dir;
1505 if (ret)
1506 return ret;
1507
1508 ret = b->k.type ^ key->type;
1509 if (ret)
1510 return ret;
1511
1512 ret = b->k.family ^ key->family;
1513 if (ret)
1514 return ret;
1515
1516 return b->k.if_id ^ key->if_id;
1517 }
1518
1519 static const struct rhashtable_params xfrm_pol_inexact_params = {
1520 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1521 .hashfn = xfrm_pol_bin_key,
1522 .obj_hashfn = xfrm_pol_bin_obj,
1523 .obj_cmpfn = xfrm_pol_bin_cmp,
1524 .automatic_shrinking = true,
1525 };
1526
xfrm_policy_insert_inexact_list(struct hlist_head * chain,struct xfrm_policy * policy)1527 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1528 struct xfrm_policy *policy)
1529 {
1530 struct xfrm_policy *pol, *delpol = NULL;
1531 struct hlist_node *newpos = NULL;
1532 int i = 0;
1533
1534 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1535 if (pol->type == policy->type &&
1536 pol->if_id == policy->if_id &&
1537 !selector_cmp(&pol->selector, &policy->selector) &&
1538 xfrm_policy_mark_match(&policy->mark, pol) &&
1539 xfrm_sec_ctx_match(pol->security, policy->security) &&
1540 !WARN_ON(delpol)) {
1541 delpol = pol;
1542 if (policy->priority > pol->priority)
1543 continue;
1544 } else if (policy->priority >= pol->priority) {
1545 newpos = &pol->bydst_inexact_list;
1546 continue;
1547 }
1548 if (delpol)
1549 break;
1550 }
1551
1552 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1553 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1554 else
1555 hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1556
1557 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1558 pol->pos = i;
1559 i++;
1560 }
1561 }
1562
xfrm_policy_insert_list(struct hlist_head * chain,struct xfrm_policy * policy,bool excl)1563 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1564 struct xfrm_policy *policy,
1565 bool excl)
1566 {
1567 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1568
1569 hlist_for_each_entry(pol, chain, bydst) {
1570 if (pol->type == policy->type &&
1571 pol->if_id == policy->if_id &&
1572 !selector_cmp(&pol->selector, &policy->selector) &&
1573 xfrm_policy_mark_match(&policy->mark, pol) &&
1574 xfrm_sec_ctx_match(pol->security, policy->security) &&
1575 !WARN_ON(delpol)) {
1576 if (excl)
1577 return ERR_PTR(-EEXIST);
1578 delpol = pol;
1579 if (policy->priority > pol->priority)
1580 continue;
1581 } else if (policy->priority >= pol->priority) {
1582 newpos = pol;
1583 continue;
1584 }
1585 if (delpol)
1586 break;
1587 }
1588
1589 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1590 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1591 else
1592 /* Packet offload policies enter to the head
1593 * to speed-up lookups.
1594 */
1595 hlist_add_head_rcu(&policy->bydst, chain);
1596
1597 return delpol;
1598 }
1599
xfrm_policy_insert(int dir,struct xfrm_policy * policy,int excl)1600 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1601 {
1602 struct net *net = xp_net(policy);
1603 struct xfrm_policy *delpol;
1604 struct hlist_head *chain;
1605
1606 /* Sanitize mark before store */
1607 policy->mark.v &= policy->mark.m;
1608
1609 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1610 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1611 if (chain)
1612 delpol = xfrm_policy_insert_list(chain, policy, excl);
1613 else
1614 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1615
1616 if (IS_ERR(delpol)) {
1617 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1618 return PTR_ERR(delpol);
1619 }
1620
1621 __xfrm_policy_link(policy, dir);
1622
1623 /* After previous checking, family can either be AF_INET or AF_INET6 */
1624 if (policy->family == AF_INET)
1625 rt_genid_bump_ipv4(net);
1626 else
1627 rt_genid_bump_ipv6(net);
1628
1629 if (delpol) {
1630 xfrm_policy_requeue(delpol, policy);
1631 __xfrm_policy_unlink(delpol, dir);
1632 }
1633 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1634 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1635 policy->curlft.add_time = ktime_get_real_seconds();
1636 policy->curlft.use_time = 0;
1637 if (!mod_timer(&policy->timer, jiffies + HZ))
1638 xfrm_pol_hold(policy);
1639 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1640
1641 if (delpol)
1642 xfrm_policy_kill(delpol);
1643 else if (xfrm_bydst_should_resize(net, dir, NULL))
1644 schedule_work(&net->xfrm.policy_hash_work);
1645
1646 return 0;
1647 }
1648 EXPORT_SYMBOL(xfrm_policy_insert);
1649
1650 static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head * chain,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx)1651 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1652 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1653 struct xfrm_sec_ctx *ctx)
1654 {
1655 struct xfrm_policy *pol;
1656
1657 if (!chain)
1658 return NULL;
1659
1660 hlist_for_each_entry(pol, chain, bydst) {
1661 if (pol->type == type &&
1662 pol->if_id == if_id &&
1663 xfrm_policy_mark_match(mark, pol) &&
1664 !selector_cmp(sel, &pol->selector) &&
1665 xfrm_sec_ctx_match(ctx, pol->security))
1666 return pol;
1667 }
1668
1669 return NULL;
1670 }
1671
1672 struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx,int delete,int * err)1673 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1674 u8 type, int dir, struct xfrm_selector *sel,
1675 struct xfrm_sec_ctx *ctx, int delete, int *err)
1676 {
1677 struct xfrm_pol_inexact_bin *bin = NULL;
1678 struct xfrm_policy *pol, *ret = NULL;
1679 struct hlist_head *chain;
1680
1681 *err = 0;
1682 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1683 chain = policy_hash_bysel(net, sel, sel->family, dir);
1684 if (!chain) {
1685 struct xfrm_pol_inexact_candidates cand;
1686 int i;
1687
1688 bin = xfrm_policy_inexact_lookup(net, type,
1689 sel->family, dir, if_id);
1690 if (!bin) {
1691 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1692 return NULL;
1693 }
1694
1695 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1696 &sel->saddr,
1697 &sel->daddr)) {
1698 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1699 return NULL;
1700 }
1701
1702 pol = NULL;
1703 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1704 struct xfrm_policy *tmp;
1705
1706 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1707 if_id, type, dir,
1708 sel, ctx);
1709 if (!tmp)
1710 continue;
1711
1712 if (!pol || tmp->pos < pol->pos)
1713 pol = tmp;
1714 }
1715 } else {
1716 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1717 sel, ctx);
1718 }
1719
1720 if (pol) {
1721 xfrm_pol_hold(pol);
1722 if (delete) {
1723 *err = security_xfrm_policy_delete(pol->security);
1724 if (*err) {
1725 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1726 return pol;
1727 }
1728 __xfrm_policy_unlink(pol, dir);
1729 }
1730 ret = pol;
1731 }
1732 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1733
1734 if (ret && delete)
1735 xfrm_policy_kill(ret);
1736 if (bin && delete)
1737 xfrm_policy_inexact_prune_bin(bin);
1738 return ret;
1739 }
1740 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1741
1742 struct xfrm_policy *
xfrm_policy_byid(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,u32 id,int delete,int * err)1743 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1744 u8 type, int dir, u32 id, int delete, int *err)
1745 {
1746 struct xfrm_policy *pol, *ret;
1747 struct hlist_head *chain;
1748
1749 *err = -ENOENT;
1750 if (xfrm_policy_id2dir(id) != dir)
1751 return NULL;
1752
1753 *err = 0;
1754 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1755 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1756 ret = NULL;
1757 hlist_for_each_entry(pol, chain, byidx) {
1758 if (pol->type == type && pol->index == id &&
1759 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1760 xfrm_pol_hold(pol);
1761 if (delete) {
1762 *err = security_xfrm_policy_delete(
1763 pol->security);
1764 if (*err) {
1765 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1766 return pol;
1767 }
1768 __xfrm_policy_unlink(pol, dir);
1769 }
1770 ret = pol;
1771 break;
1772 }
1773 }
1774 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1775
1776 if (ret && delete)
1777 xfrm_policy_kill(ret);
1778 return ret;
1779 }
1780 EXPORT_SYMBOL(xfrm_policy_byid);
1781
1782 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1783 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1784 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1785 {
1786 struct xfrm_policy *pol;
1787 int err = 0;
1788
1789 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1790 if (pol->walk.dead ||
1791 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1792 pol->type != type)
1793 continue;
1794
1795 err = security_xfrm_policy_delete(pol->security);
1796 if (err) {
1797 xfrm_audit_policy_delete(pol, 0, task_valid);
1798 return err;
1799 }
1800 }
1801 return err;
1802 }
1803
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1804 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1805 struct net_device *dev,
1806 bool task_valid)
1807 {
1808 struct xfrm_policy *pol;
1809 int err = 0;
1810
1811 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1812 if (pol->walk.dead ||
1813 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1814 pol->xdo.dev != dev)
1815 continue;
1816
1817 err = security_xfrm_policy_delete(pol->security);
1818 if (err) {
1819 xfrm_audit_policy_delete(pol, 0, task_valid);
1820 return err;
1821 }
1822 }
1823 return err;
1824 }
1825 #else
1826 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1827 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1828 {
1829 return 0;
1830 }
1831
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1832 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1833 struct net_device *dev,
1834 bool task_valid)
1835 {
1836 return 0;
1837 }
1838 #endif
1839
xfrm_policy_flush(struct net * net,u8 type,bool task_valid)1840 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1841 {
1842 int dir, err = 0, cnt = 0;
1843 struct xfrm_policy *pol;
1844
1845 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1846
1847 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1848 if (err)
1849 goto out;
1850
1851 again:
1852 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1853 if (pol->walk.dead)
1854 continue;
1855
1856 dir = xfrm_policy_id2dir(pol->index);
1857 if (dir >= XFRM_POLICY_MAX ||
1858 pol->type != type)
1859 continue;
1860
1861 __xfrm_policy_unlink(pol, dir);
1862 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1863 cnt++;
1864 xfrm_audit_policy_delete(pol, 1, task_valid);
1865 xfrm_policy_kill(pol);
1866 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1867 goto again;
1868 }
1869 if (cnt)
1870 __xfrm_policy_inexact_flush(net);
1871 else
1872 err = -ESRCH;
1873 out:
1874 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1875 return err;
1876 }
1877 EXPORT_SYMBOL(xfrm_policy_flush);
1878
xfrm_dev_policy_flush(struct net * net,struct net_device * dev,bool task_valid)1879 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1880 bool task_valid)
1881 {
1882 int dir, err = 0, cnt = 0;
1883 struct xfrm_policy *pol;
1884
1885 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1886
1887 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1888 if (err)
1889 goto out;
1890
1891 again:
1892 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1893 if (pol->walk.dead)
1894 continue;
1895
1896 dir = xfrm_policy_id2dir(pol->index);
1897 if (dir >= XFRM_POLICY_MAX ||
1898 pol->xdo.dev != dev)
1899 continue;
1900
1901 __xfrm_policy_unlink(pol, dir);
1902 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1903 cnt++;
1904 xfrm_audit_policy_delete(pol, 1, task_valid);
1905 xfrm_policy_kill(pol);
1906 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1907 goto again;
1908 }
1909 if (cnt)
1910 __xfrm_policy_inexact_flush(net);
1911 else
1912 err = -ESRCH;
1913 out:
1914 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1915 return err;
1916 }
1917 EXPORT_SYMBOL(xfrm_dev_policy_flush);
1918
xfrm_policy_walk(struct net * net,struct xfrm_policy_walk * walk,int (* func)(struct xfrm_policy *,int,int,void *),void * data)1919 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1920 int (*func)(struct xfrm_policy *, int, int, void*),
1921 void *data)
1922 {
1923 struct xfrm_policy *pol;
1924 struct xfrm_policy_walk_entry *x;
1925 int error = 0;
1926
1927 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1928 walk->type != XFRM_POLICY_TYPE_ANY)
1929 return -EINVAL;
1930
1931 if (list_empty(&walk->walk.all) && walk->seq != 0)
1932 return 0;
1933
1934 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1935 if (list_empty(&walk->walk.all))
1936 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1937 else
1938 x = list_first_entry(&walk->walk.all,
1939 struct xfrm_policy_walk_entry, all);
1940
1941 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1942 if (x->dead)
1943 continue;
1944 pol = container_of(x, struct xfrm_policy, walk);
1945 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1946 walk->type != pol->type)
1947 continue;
1948 error = func(pol, xfrm_policy_id2dir(pol->index),
1949 walk->seq, data);
1950 if (error) {
1951 list_move_tail(&walk->walk.all, &x->all);
1952 goto out;
1953 }
1954 walk->seq++;
1955 }
1956 if (walk->seq == 0) {
1957 error = -ENOENT;
1958 goto out;
1959 }
1960 list_del_init(&walk->walk.all);
1961 out:
1962 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1963 return error;
1964 }
1965 EXPORT_SYMBOL(xfrm_policy_walk);
1966
xfrm_policy_walk_init(struct xfrm_policy_walk * walk,u8 type)1967 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1968 {
1969 INIT_LIST_HEAD(&walk->walk.all);
1970 walk->walk.dead = 1;
1971 walk->type = type;
1972 walk->seq = 0;
1973 }
1974 EXPORT_SYMBOL(xfrm_policy_walk_init);
1975
xfrm_policy_walk_done(struct xfrm_policy_walk * walk,struct net * net)1976 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1977 {
1978 if (list_empty(&walk->walk.all))
1979 return;
1980
1981 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1982 list_del(&walk->walk.all);
1983 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1984 }
1985 EXPORT_SYMBOL(xfrm_policy_walk_done);
1986
1987 /*
1988 * Find policy to apply to this flow.
1989 *
1990 * Returns 0 if policy found, else an -errno.
1991 */
xfrm_policy_match(const struct xfrm_policy * pol,const struct flowi * fl,u8 type,u16 family,u32 if_id)1992 static int xfrm_policy_match(const struct xfrm_policy *pol,
1993 const struct flowi *fl,
1994 u8 type, u16 family, u32 if_id)
1995 {
1996 const struct xfrm_selector *sel = &pol->selector;
1997 int ret = -ESRCH;
1998 bool match;
1999
2000 if (pol->family != family ||
2001 pol->if_id != if_id ||
2002 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
2003 pol->type != type)
2004 return ret;
2005
2006 match = xfrm_selector_match(sel, fl, family);
2007 if (match)
2008 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
2009 return ret;
2010 }
2011
2012 static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root * r,seqcount_spinlock_t * count,const xfrm_address_t * addr,u16 family)2013 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
2014 seqcount_spinlock_t *count,
2015 const xfrm_address_t *addr, u16 family)
2016 {
2017 const struct rb_node *parent;
2018 int seq;
2019
2020 again:
2021 seq = read_seqcount_begin(count);
2022
2023 parent = rcu_dereference_raw(r->rb_node);
2024 while (parent) {
2025 struct xfrm_pol_inexact_node *node;
2026 int delta;
2027
2028 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2029
2030 delta = xfrm_policy_addr_delta(addr, &node->addr,
2031 node->prefixlen, family);
2032 if (delta < 0) {
2033 parent = rcu_dereference_raw(parent->rb_left);
2034 continue;
2035 } else if (delta > 0) {
2036 parent = rcu_dereference_raw(parent->rb_right);
2037 continue;
2038 }
2039
2040 return node;
2041 }
2042
2043 if (read_seqcount_retry(count, seq))
2044 goto again;
2045
2046 return NULL;
2047 }
2048
2049 static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_pol_inexact_bin * b,const xfrm_address_t * saddr,const xfrm_address_t * daddr)2050 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2051 struct xfrm_pol_inexact_bin *b,
2052 const xfrm_address_t *saddr,
2053 const xfrm_address_t *daddr)
2054 {
2055 struct xfrm_pol_inexact_node *n;
2056 u16 family;
2057
2058 if (!b)
2059 return false;
2060
2061 family = b->k.family;
2062 memset(cand, 0, sizeof(*cand));
2063 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2064
2065 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2066 family);
2067 if (n) {
2068 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2069 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2070 family);
2071 if (n)
2072 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2073 }
2074
2075 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2076 family);
2077 if (n)
2078 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2079
2080 return true;
2081 }
2082
2083 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2084 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2085 u8 dir, u32 if_id)
2086 {
2087 struct xfrm_pol_inexact_key k = {
2088 .family = family,
2089 .type = type,
2090 .dir = dir,
2091 .if_id = if_id,
2092 };
2093
2094 write_pnet(&k.net, net);
2095
2096 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2097 xfrm_pol_inexact_params);
2098 }
2099
2100 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2101 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2102 u8 dir, u32 if_id)
2103 {
2104 struct xfrm_pol_inexact_bin *bin;
2105
2106 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2107
2108 rcu_read_lock();
2109 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2110 rcu_read_unlock();
2111
2112 return bin;
2113 }
2114
2115 static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head * chain,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2116 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2117 struct xfrm_policy *prefer,
2118 const struct flowi *fl,
2119 u8 type, u16 family, u32 if_id)
2120 {
2121 u32 priority = prefer ? prefer->priority : ~0u;
2122 struct xfrm_policy *pol;
2123
2124 if (!chain)
2125 return NULL;
2126
2127 hlist_for_each_entry_rcu(pol, chain, bydst) {
2128 int err;
2129
2130 if (pol->priority > priority)
2131 break;
2132
2133 err = xfrm_policy_match(pol, fl, type, family, if_id);
2134 if (err) {
2135 if (err != -ESRCH)
2136 return ERR_PTR(err);
2137
2138 continue;
2139 }
2140
2141 if (prefer) {
2142 /* matches. Is it older than *prefer? */
2143 if (pol->priority == priority &&
2144 prefer->pos < pol->pos)
2145 return prefer;
2146 }
2147
2148 return pol;
2149 }
2150
2151 return NULL;
2152 }
2153
2154 static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2155 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2156 struct xfrm_policy *prefer,
2157 const struct flowi *fl,
2158 u8 type, u16 family, u32 if_id)
2159 {
2160 struct xfrm_policy *tmp;
2161 int i;
2162
2163 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2164 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2165 prefer,
2166 fl, type, family, if_id);
2167 if (!tmp)
2168 continue;
2169
2170 if (IS_ERR(tmp))
2171 return tmp;
2172 prefer = tmp;
2173 }
2174
2175 return prefer;
2176 }
2177
xfrm_policy_lookup_bytype(struct net * net,u8 type,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2178 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2179 const struct flowi *fl,
2180 u16 family, u8 dir,
2181 u32 if_id)
2182 {
2183 struct xfrm_pol_inexact_candidates cand;
2184 const xfrm_address_t *daddr, *saddr;
2185 struct xfrm_pol_inexact_bin *bin;
2186 struct xfrm_policy *pol, *ret;
2187 struct hlist_head *chain;
2188 unsigned int sequence;
2189 int err;
2190
2191 daddr = xfrm_flowi_daddr(fl, family);
2192 saddr = xfrm_flowi_saddr(fl, family);
2193 if (unlikely(!daddr || !saddr))
2194 return NULL;
2195
2196 rcu_read_lock();
2197 retry:
2198 do {
2199 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2200 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2201 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2202
2203 ret = NULL;
2204 hlist_for_each_entry_rcu(pol, chain, bydst) {
2205 err = xfrm_policy_match(pol, fl, type, family, if_id);
2206 if (err) {
2207 if (err == -ESRCH)
2208 continue;
2209 else {
2210 ret = ERR_PTR(err);
2211 goto fail;
2212 }
2213 } else {
2214 ret = pol;
2215 break;
2216 }
2217 }
2218 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2219 goto skip_inexact;
2220
2221 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2222 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2223 daddr))
2224 goto skip_inexact;
2225
2226 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2227 family, if_id);
2228 if (pol) {
2229 ret = pol;
2230 if (IS_ERR(pol))
2231 goto fail;
2232 }
2233
2234 skip_inexact:
2235 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2236 goto retry;
2237
2238 if (ret && !xfrm_pol_hold_rcu(ret))
2239 goto retry;
2240 fail:
2241 rcu_read_unlock();
2242
2243 return ret;
2244 }
2245
xfrm_policy_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2246 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2247 const struct flowi *fl,
2248 u16 family, u8 dir, u32 if_id)
2249 {
2250 #ifdef CONFIG_XFRM_SUB_POLICY
2251 struct xfrm_policy *pol;
2252
2253 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2254 dir, if_id);
2255 if (pol != NULL)
2256 return pol;
2257 #endif
2258 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2259 dir, if_id);
2260 }
2261
xfrm_sk_policy_lookup(const struct sock * sk,int dir,const struct flowi * fl,u16 family,u32 if_id)2262 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2263 const struct flowi *fl,
2264 u16 family, u32 if_id)
2265 {
2266 struct xfrm_policy *pol;
2267
2268 rcu_read_lock();
2269 again:
2270 pol = rcu_dereference(sk->sk_policy[dir]);
2271 if (pol != NULL) {
2272 bool match;
2273 int err = 0;
2274
2275 if (pol->family != family) {
2276 pol = NULL;
2277 goto out;
2278 }
2279
2280 match = xfrm_selector_match(&pol->selector, fl, family);
2281 if (match) {
2282 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2283 pol->if_id != if_id) {
2284 pol = NULL;
2285 goto out;
2286 }
2287 err = security_xfrm_policy_lookup(pol->security,
2288 fl->flowi_secid);
2289 if (!err) {
2290 if (!xfrm_pol_hold_rcu(pol))
2291 goto again;
2292 } else if (err == -ESRCH) {
2293 pol = NULL;
2294 } else {
2295 pol = ERR_PTR(err);
2296 }
2297 } else
2298 pol = NULL;
2299 }
2300 out:
2301 rcu_read_unlock();
2302 return pol;
2303 }
2304
__xfrm_policy_link(struct xfrm_policy * pol,int dir)2305 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2306 {
2307 struct net *net = xp_net(pol);
2308
2309 list_add(&pol->walk.all, &net->xfrm.policy_all);
2310 net->xfrm.policy_count[dir]++;
2311 xfrm_pol_hold(pol);
2312 }
2313
__xfrm_policy_unlink(struct xfrm_policy * pol,int dir)2314 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2315 int dir)
2316 {
2317 struct net *net = xp_net(pol);
2318
2319 if (list_empty(&pol->walk.all))
2320 return NULL;
2321
2322 /* Socket policies are not hashed. */
2323 if (!hlist_unhashed(&pol->bydst)) {
2324 hlist_del_rcu(&pol->bydst);
2325 hlist_del_init(&pol->bydst_inexact_list);
2326 hlist_del(&pol->byidx);
2327 }
2328
2329 list_del_init(&pol->walk.all);
2330 net->xfrm.policy_count[dir]--;
2331
2332 return pol;
2333 }
2334
xfrm_sk_policy_link(struct xfrm_policy * pol,int dir)2335 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2336 {
2337 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2338 }
2339
xfrm_sk_policy_unlink(struct xfrm_policy * pol,int dir)2340 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2341 {
2342 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2343 }
2344
xfrm_policy_delete(struct xfrm_policy * pol,int dir)2345 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2346 {
2347 struct net *net = xp_net(pol);
2348
2349 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2350 pol = __xfrm_policy_unlink(pol, dir);
2351 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2352 if (pol) {
2353 xfrm_policy_kill(pol);
2354 return 0;
2355 }
2356 return -ENOENT;
2357 }
2358 EXPORT_SYMBOL(xfrm_policy_delete);
2359
xfrm_sk_policy_insert(struct sock * sk,int dir,struct xfrm_policy * pol)2360 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2361 {
2362 struct net *net = sock_net(sk);
2363 struct xfrm_policy *old_pol;
2364
2365 #ifdef CONFIG_XFRM_SUB_POLICY
2366 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2367 return -EINVAL;
2368 #endif
2369
2370 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2371 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2372 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2373 if (pol) {
2374 pol->curlft.add_time = ktime_get_real_seconds();
2375 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2376 xfrm_sk_policy_link(pol, dir);
2377 }
2378 rcu_assign_pointer(sk->sk_policy[dir], pol);
2379 if (old_pol) {
2380 if (pol)
2381 xfrm_policy_requeue(old_pol, pol);
2382
2383 /* Unlinking succeeds always. This is the only function
2384 * allowed to delete or replace socket policy.
2385 */
2386 xfrm_sk_policy_unlink(old_pol, dir);
2387 }
2388 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2389
2390 if (old_pol) {
2391 xfrm_policy_kill(old_pol);
2392 }
2393 return 0;
2394 }
2395
clone_policy(const struct xfrm_policy * old,int dir)2396 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2397 {
2398 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2399 struct net *net = xp_net(old);
2400
2401 if (newp) {
2402 newp->selector = old->selector;
2403 if (security_xfrm_policy_clone(old->security,
2404 &newp->security)) {
2405 kfree(newp);
2406 return NULL; /* ENOMEM */
2407 }
2408 newp->lft = old->lft;
2409 newp->curlft = old->curlft;
2410 newp->mark = old->mark;
2411 newp->if_id = old->if_id;
2412 newp->action = old->action;
2413 newp->flags = old->flags;
2414 newp->xfrm_nr = old->xfrm_nr;
2415 newp->index = old->index;
2416 newp->type = old->type;
2417 newp->family = old->family;
2418 memcpy(newp->xfrm_vec, old->xfrm_vec,
2419 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2420 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2421 xfrm_sk_policy_link(newp, dir);
2422 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2423 xfrm_pol_put(newp);
2424 }
2425 return newp;
2426 }
2427
__xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)2428 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2429 {
2430 const struct xfrm_policy *p;
2431 struct xfrm_policy *np;
2432 int i, ret = 0;
2433
2434 rcu_read_lock();
2435 for (i = 0; i < 2; i++) {
2436 p = rcu_dereference(osk->sk_policy[i]);
2437 if (p) {
2438 np = clone_policy(p, i);
2439 if (unlikely(!np)) {
2440 ret = -ENOMEM;
2441 break;
2442 }
2443 rcu_assign_pointer(sk->sk_policy[i], np);
2444 }
2445 }
2446 rcu_read_unlock();
2447 return ret;
2448 }
2449
2450 static int
xfrm_get_saddr(unsigned short family,xfrm_address_t * saddr,const struct xfrm_dst_lookup_params * params)2451 xfrm_get_saddr(unsigned short family, xfrm_address_t *saddr,
2452 const struct xfrm_dst_lookup_params *params)
2453 {
2454 int err;
2455 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2456
2457 if (unlikely(afinfo == NULL))
2458 return -EINVAL;
2459 err = afinfo->get_saddr(saddr, params);
2460 rcu_read_unlock();
2461 return err;
2462 }
2463
2464 /* Resolve list of templates for the flow, given policy. */
2465
2466 static int
xfrm_tmpl_resolve_one(struct xfrm_policy * policy,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2467 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2468 struct xfrm_state **xfrm, unsigned short family)
2469 {
2470 struct net *net = xp_net(policy);
2471 int nx;
2472 int i, error;
2473 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2474 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2475 xfrm_address_t tmp;
2476
2477 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2478 struct xfrm_state *x;
2479 xfrm_address_t *remote = daddr;
2480 xfrm_address_t *local = saddr;
2481 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2482
2483 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2484 tmpl->mode == XFRM_MODE_BEET) {
2485 remote = &tmpl->id.daddr;
2486 local = &tmpl->saddr;
2487 if (xfrm_addr_any(local, tmpl->encap_family)) {
2488 struct xfrm_dst_lookup_params params;
2489
2490 memset(¶ms, 0, sizeof(params));
2491 params.net = net;
2492 params.oif = fl->flowi_oif;
2493 params.daddr = remote;
2494 error = xfrm_get_saddr(tmpl->encap_family, &tmp,
2495 ¶ms);
2496 if (error)
2497 goto fail;
2498 local = &tmp;
2499 }
2500 }
2501
2502 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2503 family, policy->if_id);
2504
2505 if (x && x->km.state == XFRM_STATE_VALID) {
2506 xfrm[nx++] = x;
2507 daddr = remote;
2508 saddr = local;
2509 continue;
2510 }
2511 if (x) {
2512 error = (x->km.state == XFRM_STATE_ERROR ?
2513 -EINVAL : -EAGAIN);
2514 xfrm_state_put(x);
2515 } else if (error == -ESRCH) {
2516 error = -EAGAIN;
2517 }
2518
2519 if (!tmpl->optional)
2520 goto fail;
2521 }
2522 return nx;
2523
2524 fail:
2525 for (nx--; nx >= 0; nx--)
2526 xfrm_state_put(xfrm[nx]);
2527 return error;
2528 }
2529
2530 static int
xfrm_tmpl_resolve(struct xfrm_policy ** pols,int npols,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2531 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2532 struct xfrm_state **xfrm, unsigned short family)
2533 {
2534 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2535 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2536 int cnx = 0;
2537 int error;
2538 int ret;
2539 int i;
2540
2541 for (i = 0; i < npols; i++) {
2542 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2543 error = -ENOBUFS;
2544 goto fail;
2545 }
2546
2547 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2548 if (ret < 0) {
2549 error = ret;
2550 goto fail;
2551 } else
2552 cnx += ret;
2553 }
2554
2555 /* found states are sorted for outbound processing */
2556 if (npols > 1)
2557 xfrm_state_sort(xfrm, tpp, cnx, family);
2558
2559 return cnx;
2560
2561 fail:
2562 for (cnx--; cnx >= 0; cnx--)
2563 xfrm_state_put(tpp[cnx]);
2564 return error;
2565
2566 }
2567
xfrm_get_tos(const struct flowi * fl,int family)2568 static int xfrm_get_tos(const struct flowi *fl, int family)
2569 {
2570 if (family == AF_INET)
2571 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2572
2573 return 0;
2574 }
2575
xfrm_alloc_dst(struct net * net,int family)2576 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2577 {
2578 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2579 struct dst_ops *dst_ops;
2580 struct xfrm_dst *xdst;
2581
2582 if (!afinfo)
2583 return ERR_PTR(-EINVAL);
2584
2585 switch (family) {
2586 case AF_INET:
2587 dst_ops = &net->xfrm.xfrm4_dst_ops;
2588 break;
2589 #if IS_ENABLED(CONFIG_IPV6)
2590 case AF_INET6:
2591 dst_ops = &net->xfrm.xfrm6_dst_ops;
2592 break;
2593 #endif
2594 default:
2595 BUG();
2596 }
2597 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2598
2599 if (likely(xdst)) {
2600 memset_after(xdst, 0, u.dst);
2601 } else
2602 xdst = ERR_PTR(-ENOBUFS);
2603
2604 rcu_read_unlock();
2605
2606 return xdst;
2607 }
2608
xfrm_init_path(struct xfrm_dst * path,struct dst_entry * dst,int nfheader_len)2609 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2610 int nfheader_len)
2611 {
2612 if (dst->ops->family == AF_INET6) {
2613 path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
2614 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2615 }
2616 }
2617
xfrm_fill_dst(struct xfrm_dst * xdst,struct net_device * dev,const struct flowi * fl)2618 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2619 const struct flowi *fl)
2620 {
2621 const struct xfrm_policy_afinfo *afinfo =
2622 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2623 int err;
2624
2625 if (!afinfo)
2626 return -EINVAL;
2627
2628 err = afinfo->fill_dst(xdst, dev, fl);
2629
2630 rcu_read_unlock();
2631
2632 return err;
2633 }
2634
2635
2636 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2637 * all the metrics... Shortly, bundle a bundle.
2638 */
2639
xfrm_bundle_create(struct xfrm_policy * policy,struct xfrm_state ** xfrm,struct xfrm_dst ** bundle,int nx,const struct flowi * fl,struct dst_entry * dst)2640 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2641 struct xfrm_state **xfrm,
2642 struct xfrm_dst **bundle,
2643 int nx,
2644 const struct flowi *fl,
2645 struct dst_entry *dst)
2646 {
2647 const struct xfrm_state_afinfo *afinfo;
2648 const struct xfrm_mode *inner_mode;
2649 struct net *net = xp_net(policy);
2650 unsigned long now = jiffies;
2651 struct net_device *dev;
2652 struct xfrm_dst *xdst_prev = NULL;
2653 struct xfrm_dst *xdst0 = NULL;
2654 int i = 0;
2655 int err;
2656 int header_len = 0;
2657 int nfheader_len = 0;
2658 int trailer_len = 0;
2659 int tos;
2660 int family = policy->selector.family;
2661 xfrm_address_t saddr, daddr;
2662
2663 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2664
2665 tos = xfrm_get_tos(fl, family);
2666
2667 dst_hold(dst);
2668
2669 for (; i < nx; i++) {
2670 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2671 struct dst_entry *dst1 = &xdst->u.dst;
2672
2673 err = PTR_ERR(xdst);
2674 if (IS_ERR(xdst)) {
2675 dst_release(dst);
2676 goto put_states;
2677 }
2678
2679 bundle[i] = xdst;
2680 if (!xdst_prev)
2681 xdst0 = xdst;
2682 else
2683 /* Ref count is taken during xfrm_alloc_dst()
2684 * No need to do dst_clone() on dst1
2685 */
2686 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2687
2688 if (xfrm[i]->sel.family == AF_UNSPEC) {
2689 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2690 xfrm_af2proto(family));
2691 if (!inner_mode) {
2692 err = -EAFNOSUPPORT;
2693 dst_release(dst);
2694 goto put_states;
2695 }
2696 } else
2697 inner_mode = &xfrm[i]->inner_mode;
2698
2699 xdst->route = dst;
2700 dst_copy_metrics(dst1, dst);
2701
2702 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2703 __u32 mark = 0;
2704 int oif;
2705
2706 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2707 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2708
2709 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2710 family = xfrm[i]->props.family;
2711
2712 oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2713 dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2714 &saddr, &daddr, family, mark);
2715 err = PTR_ERR(dst);
2716 if (IS_ERR(dst))
2717 goto put_states;
2718 } else
2719 dst_hold(dst);
2720
2721 dst1->xfrm = xfrm[i];
2722 xdst->xfrm_genid = xfrm[i]->genid;
2723
2724 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2725 dst1->lastuse = now;
2726
2727 dst1->input = dst_discard;
2728
2729 rcu_read_lock();
2730 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2731 if (likely(afinfo))
2732 dst1->output = afinfo->output;
2733 else
2734 dst1->output = dst_discard_out;
2735 rcu_read_unlock();
2736
2737 xdst_prev = xdst;
2738
2739 header_len += xfrm[i]->props.header_len;
2740 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2741 nfheader_len += xfrm[i]->props.header_len;
2742 trailer_len += xfrm[i]->props.trailer_len;
2743 }
2744
2745 xfrm_dst_set_child(xdst_prev, dst);
2746 xdst0->path = dst;
2747
2748 err = -ENODEV;
2749 dev = dst->dev;
2750 if (!dev)
2751 goto free_dst;
2752
2753 xfrm_init_path(xdst0, dst, nfheader_len);
2754 xfrm_init_pmtu(bundle, nx);
2755
2756 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2757 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2758 err = xfrm_fill_dst(xdst_prev, dev, fl);
2759 if (err)
2760 goto free_dst;
2761
2762 xdst_prev->u.dst.header_len = header_len;
2763 xdst_prev->u.dst.trailer_len = trailer_len;
2764 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2765 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2766 }
2767
2768 return &xdst0->u.dst;
2769
2770 put_states:
2771 for (; i < nx; i++)
2772 xfrm_state_put(xfrm[i]);
2773 free_dst:
2774 if (xdst0)
2775 dst_release_immediate(&xdst0->u.dst);
2776
2777 return ERR_PTR(err);
2778 }
2779
xfrm_expand_policies(const struct flowi * fl,u16 family,struct xfrm_policy ** pols,int * num_pols,int * num_xfrms)2780 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2781 struct xfrm_policy **pols,
2782 int *num_pols, int *num_xfrms)
2783 {
2784 int i;
2785
2786 if (*num_pols == 0 || !pols[0]) {
2787 *num_pols = 0;
2788 *num_xfrms = 0;
2789 return 0;
2790 }
2791 if (IS_ERR(pols[0])) {
2792 *num_pols = 0;
2793 return PTR_ERR(pols[0]);
2794 }
2795
2796 *num_xfrms = pols[0]->xfrm_nr;
2797
2798 #ifdef CONFIG_XFRM_SUB_POLICY
2799 if (pols[0]->action == XFRM_POLICY_ALLOW &&
2800 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2801 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2802 XFRM_POLICY_TYPE_MAIN,
2803 fl, family,
2804 XFRM_POLICY_OUT,
2805 pols[0]->if_id);
2806 if (pols[1]) {
2807 if (IS_ERR(pols[1])) {
2808 xfrm_pols_put(pols, *num_pols);
2809 *num_pols = 0;
2810 return PTR_ERR(pols[1]);
2811 }
2812 (*num_pols)++;
2813 (*num_xfrms) += pols[1]->xfrm_nr;
2814 }
2815 }
2816 #endif
2817 for (i = 0; i < *num_pols; i++) {
2818 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2819 *num_xfrms = -1;
2820 break;
2821 }
2822 }
2823
2824 return 0;
2825
2826 }
2827
2828 static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy ** pols,int num_pols,const struct flowi * fl,u16 family,struct dst_entry * dst_orig)2829 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2830 const struct flowi *fl, u16 family,
2831 struct dst_entry *dst_orig)
2832 {
2833 struct net *net = xp_net(pols[0]);
2834 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2835 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2836 struct xfrm_dst *xdst;
2837 struct dst_entry *dst;
2838 int err;
2839
2840 /* Try to instantiate a bundle */
2841 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2842 if (err <= 0) {
2843 if (err == 0)
2844 return NULL;
2845
2846 if (err != -EAGAIN)
2847 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2848 return ERR_PTR(err);
2849 }
2850
2851 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2852 if (IS_ERR(dst)) {
2853 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2854 return ERR_CAST(dst);
2855 }
2856
2857 xdst = (struct xfrm_dst *)dst;
2858 xdst->num_xfrms = err;
2859 xdst->num_pols = num_pols;
2860 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2861 xdst->policy_genid = atomic_read(&pols[0]->genid);
2862
2863 return xdst;
2864 }
2865
xfrm_policy_queue_process(struct timer_list * t)2866 static void xfrm_policy_queue_process(struct timer_list *t)
2867 {
2868 struct sk_buff *skb;
2869 struct sock *sk;
2870 struct dst_entry *dst;
2871 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2872 struct net *net = xp_net(pol);
2873 struct xfrm_policy_queue *pq = &pol->polq;
2874 struct flowi fl;
2875 struct sk_buff_head list;
2876 __u32 skb_mark;
2877
2878 spin_lock(&pq->hold_queue.lock);
2879 skb = skb_peek(&pq->hold_queue);
2880 if (!skb) {
2881 spin_unlock(&pq->hold_queue.lock);
2882 goto out;
2883 }
2884 dst = skb_dst(skb);
2885 sk = skb->sk;
2886
2887 /* Fixup the mark to support VTI. */
2888 skb_mark = skb->mark;
2889 skb->mark = pol->mark.v;
2890 xfrm_decode_session(skb, &fl, dst->ops->family);
2891 skb->mark = skb_mark;
2892 spin_unlock(&pq->hold_queue.lock);
2893
2894 dst_hold(xfrm_dst_path(dst));
2895 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2896 if (IS_ERR(dst))
2897 goto purge_queue;
2898
2899 if (dst->flags & DST_XFRM_QUEUE) {
2900 dst_release(dst);
2901
2902 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2903 goto purge_queue;
2904
2905 pq->timeout = pq->timeout << 1;
2906 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2907 xfrm_pol_hold(pol);
2908 goto out;
2909 }
2910
2911 dst_release(dst);
2912
2913 __skb_queue_head_init(&list);
2914
2915 spin_lock(&pq->hold_queue.lock);
2916 pq->timeout = 0;
2917 skb_queue_splice_init(&pq->hold_queue, &list);
2918 spin_unlock(&pq->hold_queue.lock);
2919
2920 while (!skb_queue_empty(&list)) {
2921 skb = __skb_dequeue(&list);
2922
2923 /* Fixup the mark to support VTI. */
2924 skb_mark = skb->mark;
2925 skb->mark = pol->mark.v;
2926 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2927 skb->mark = skb_mark;
2928
2929 dst_hold(xfrm_dst_path(skb_dst(skb)));
2930 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2931 if (IS_ERR(dst)) {
2932 kfree_skb(skb);
2933 continue;
2934 }
2935
2936 nf_reset_ct(skb);
2937 skb_dst_drop(skb);
2938 skb_dst_set(skb, dst);
2939
2940 dst_output(net, skb->sk, skb);
2941 }
2942
2943 out:
2944 xfrm_pol_put(pol);
2945 return;
2946
2947 purge_queue:
2948 pq->timeout = 0;
2949 skb_queue_purge(&pq->hold_queue);
2950 xfrm_pol_put(pol);
2951 }
2952
xdst_queue_output(struct net * net,struct sock * sk,struct sk_buff * skb)2953 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2954 {
2955 unsigned long sched_next;
2956 struct dst_entry *dst = skb_dst(skb);
2957 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2958 struct xfrm_policy *pol = xdst->pols[0];
2959 struct xfrm_policy_queue *pq = &pol->polq;
2960
2961 if (unlikely(skb_fclone_busy(sk, skb))) {
2962 kfree_skb(skb);
2963 return 0;
2964 }
2965
2966 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2967 kfree_skb(skb);
2968 return -EAGAIN;
2969 }
2970
2971 skb_dst_force(skb);
2972
2973 spin_lock_bh(&pq->hold_queue.lock);
2974
2975 if (!pq->timeout)
2976 pq->timeout = XFRM_QUEUE_TMO_MIN;
2977
2978 sched_next = jiffies + pq->timeout;
2979
2980 if (del_timer(&pq->hold_timer)) {
2981 if (time_before(pq->hold_timer.expires, sched_next))
2982 sched_next = pq->hold_timer.expires;
2983 xfrm_pol_put(pol);
2984 }
2985
2986 __skb_queue_tail(&pq->hold_queue, skb);
2987 if (!mod_timer(&pq->hold_timer, sched_next))
2988 xfrm_pol_hold(pol);
2989
2990 spin_unlock_bh(&pq->hold_queue.lock);
2991
2992 return 0;
2993 }
2994
xfrm_create_dummy_bundle(struct net * net,struct xfrm_flo * xflo,const struct flowi * fl,int num_xfrms,u16 family)2995 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2996 struct xfrm_flo *xflo,
2997 const struct flowi *fl,
2998 int num_xfrms,
2999 u16 family)
3000 {
3001 int err;
3002 struct net_device *dev;
3003 struct dst_entry *dst;
3004 struct dst_entry *dst1;
3005 struct xfrm_dst *xdst;
3006
3007 xdst = xfrm_alloc_dst(net, family);
3008 if (IS_ERR(xdst))
3009 return xdst;
3010
3011 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
3012 net->xfrm.sysctl_larval_drop ||
3013 num_xfrms <= 0)
3014 return xdst;
3015
3016 dst = xflo->dst_orig;
3017 dst1 = &xdst->u.dst;
3018 dst_hold(dst);
3019 xdst->route = dst;
3020
3021 dst_copy_metrics(dst1, dst);
3022
3023 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3024 dst1->flags |= DST_XFRM_QUEUE;
3025 dst1->lastuse = jiffies;
3026
3027 dst1->input = dst_discard;
3028 dst1->output = xdst_queue_output;
3029
3030 dst_hold(dst);
3031 xfrm_dst_set_child(xdst, dst);
3032 xdst->path = dst;
3033
3034 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3035
3036 err = -ENODEV;
3037 dev = dst->dev;
3038 if (!dev)
3039 goto free_dst;
3040
3041 err = xfrm_fill_dst(xdst, dev, fl);
3042 if (err)
3043 goto free_dst;
3044
3045 out:
3046 return xdst;
3047
3048 free_dst:
3049 dst_release(dst1);
3050 xdst = ERR_PTR(err);
3051 goto out;
3052 }
3053
xfrm_bundle_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,struct xfrm_flo * xflo,u32 if_id)3054 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3055 const struct flowi *fl,
3056 u16 family, u8 dir,
3057 struct xfrm_flo *xflo, u32 if_id)
3058 {
3059 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3060 int num_pols = 0, num_xfrms = 0, err;
3061 struct xfrm_dst *xdst;
3062
3063 /* Resolve policies to use if we couldn't get them from
3064 * previous cache entry */
3065 num_pols = 1;
3066 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3067 err = xfrm_expand_policies(fl, family, pols,
3068 &num_pols, &num_xfrms);
3069 if (err < 0)
3070 goto inc_error;
3071 if (num_pols == 0)
3072 return NULL;
3073 if (num_xfrms <= 0)
3074 goto make_dummy_bundle;
3075
3076 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3077 xflo->dst_orig);
3078 if (IS_ERR(xdst)) {
3079 err = PTR_ERR(xdst);
3080 if (err == -EREMOTE) {
3081 xfrm_pols_put(pols, num_pols);
3082 return NULL;
3083 }
3084
3085 if (err != -EAGAIN)
3086 goto error;
3087 goto make_dummy_bundle;
3088 } else if (xdst == NULL) {
3089 num_xfrms = 0;
3090 goto make_dummy_bundle;
3091 }
3092
3093 return xdst;
3094
3095 make_dummy_bundle:
3096 /* We found policies, but there's no bundles to instantiate:
3097 * either because the policy blocks, has no transformations or
3098 * we could not build template (no xfrm_states).*/
3099 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3100 if (IS_ERR(xdst)) {
3101 xfrm_pols_put(pols, num_pols);
3102 return ERR_CAST(xdst);
3103 }
3104 xdst->num_pols = num_pols;
3105 xdst->num_xfrms = num_xfrms;
3106 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3107
3108 return xdst;
3109
3110 inc_error:
3111 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3112 error:
3113 xfrm_pols_put(pols, num_pols);
3114 return ERR_PTR(err);
3115 }
3116
make_blackhole(struct net * net,u16 family,struct dst_entry * dst_orig)3117 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3118 struct dst_entry *dst_orig)
3119 {
3120 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3121 struct dst_entry *ret;
3122
3123 if (!afinfo) {
3124 dst_release(dst_orig);
3125 return ERR_PTR(-EINVAL);
3126 } else {
3127 ret = afinfo->blackhole_route(net, dst_orig);
3128 }
3129 rcu_read_unlock();
3130
3131 return ret;
3132 }
3133
3134 /* Finds/creates a bundle for given flow and if_id
3135 *
3136 * At the moment we eat a raw IP route. Mostly to speed up lookups
3137 * on interfaces with disabled IPsec.
3138 *
3139 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3140 * compatibility
3141 */
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)3142 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3143 struct dst_entry *dst_orig,
3144 const struct flowi *fl,
3145 const struct sock *sk,
3146 int flags, u32 if_id)
3147 {
3148 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3149 struct xfrm_dst *xdst;
3150 struct dst_entry *dst, *route;
3151 u16 family = dst_orig->ops->family;
3152 u8 dir = XFRM_POLICY_OUT;
3153 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3154
3155 dst = NULL;
3156 xdst = NULL;
3157 route = NULL;
3158
3159 sk = sk_const_to_full_sk(sk);
3160 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3161 num_pols = 1;
3162 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3163 if_id);
3164 err = xfrm_expand_policies(fl, family, pols,
3165 &num_pols, &num_xfrms);
3166 if (err < 0)
3167 goto dropdst;
3168
3169 if (num_pols) {
3170 if (num_xfrms <= 0) {
3171 drop_pols = num_pols;
3172 goto no_transform;
3173 }
3174
3175 xdst = xfrm_resolve_and_create_bundle(
3176 pols, num_pols, fl,
3177 family, dst_orig);
3178
3179 if (IS_ERR(xdst)) {
3180 xfrm_pols_put(pols, num_pols);
3181 err = PTR_ERR(xdst);
3182 if (err == -EREMOTE)
3183 goto nopol;
3184
3185 goto dropdst;
3186 } else if (xdst == NULL) {
3187 num_xfrms = 0;
3188 drop_pols = num_pols;
3189 goto no_transform;
3190 }
3191
3192 route = xdst->route;
3193 }
3194 }
3195
3196 if (xdst == NULL) {
3197 struct xfrm_flo xflo;
3198
3199 xflo.dst_orig = dst_orig;
3200 xflo.flags = flags;
3201
3202 /* To accelerate a bit... */
3203 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3204 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3205 goto nopol;
3206
3207 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3208 if (xdst == NULL)
3209 goto nopol;
3210 if (IS_ERR(xdst)) {
3211 err = PTR_ERR(xdst);
3212 goto dropdst;
3213 }
3214
3215 num_pols = xdst->num_pols;
3216 num_xfrms = xdst->num_xfrms;
3217 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3218 route = xdst->route;
3219 }
3220
3221 dst = &xdst->u.dst;
3222 if (route == NULL && num_xfrms > 0) {
3223 /* The only case when xfrm_bundle_lookup() returns a
3224 * bundle with null route, is when the template could
3225 * not be resolved. It means policies are there, but
3226 * bundle could not be created, since we don't yet
3227 * have the xfrm_state's. We need to wait for KM to
3228 * negotiate new SA's or bail out with error.*/
3229 if (net->xfrm.sysctl_larval_drop) {
3230 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3231 err = -EREMOTE;
3232 goto error;
3233 }
3234
3235 err = -EAGAIN;
3236
3237 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3238 goto error;
3239 }
3240
3241 no_transform:
3242 if (num_pols == 0)
3243 goto nopol;
3244
3245 if ((flags & XFRM_LOOKUP_ICMP) &&
3246 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3247 err = -ENOENT;
3248 goto error;
3249 }
3250
3251 for (i = 0; i < num_pols; i++)
3252 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3253
3254 if (num_xfrms < 0) {
3255 /* Prohibit the flow */
3256 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3257 err = -EPERM;
3258 goto error;
3259 } else if (num_xfrms > 0) {
3260 /* Flow transformed */
3261 dst_release(dst_orig);
3262 } else {
3263 /* Flow passes untransformed */
3264 dst_release(dst);
3265 dst = dst_orig;
3266 }
3267 ok:
3268 xfrm_pols_put(pols, drop_pols);
3269 if (dst && dst->xfrm &&
3270 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3271 dst->flags |= DST_XFRM_TUNNEL;
3272 return dst;
3273
3274 nopol:
3275 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3276 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3277 err = -EPERM;
3278 goto error;
3279 }
3280 if (!(flags & XFRM_LOOKUP_ICMP)) {
3281 dst = dst_orig;
3282 goto ok;
3283 }
3284 err = -ENOENT;
3285 error:
3286 dst_release(dst);
3287 dropdst:
3288 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3289 dst_release(dst_orig);
3290 xfrm_pols_put(pols, drop_pols);
3291 return ERR_PTR(err);
3292 }
3293 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3294
3295 /* Main function: finds/creates a bundle for given flow.
3296 *
3297 * At the moment we eat a raw IP route. Mostly to speed up lookups
3298 * on interfaces with disabled IPsec.
3299 */
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3300 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3301 const struct flowi *fl, const struct sock *sk,
3302 int flags)
3303 {
3304 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3305 }
3306 EXPORT_SYMBOL(xfrm_lookup);
3307
3308 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3309 * Otherwise we may send out blackholed packets.
3310 */
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3311 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3312 const struct flowi *fl,
3313 const struct sock *sk, int flags)
3314 {
3315 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3316 flags | XFRM_LOOKUP_QUEUE |
3317 XFRM_LOOKUP_KEEP_DST_REF);
3318
3319 if (PTR_ERR(dst) == -EREMOTE)
3320 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3321
3322 if (IS_ERR(dst))
3323 dst_release(dst_orig);
3324
3325 return dst;
3326 }
3327 EXPORT_SYMBOL(xfrm_lookup_route);
3328
3329 static inline int
xfrm_secpath_reject(int idx,struct sk_buff * skb,const struct flowi * fl)3330 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3331 {
3332 struct sec_path *sp = skb_sec_path(skb);
3333 struct xfrm_state *x;
3334
3335 if (!sp || idx < 0 || idx >= sp->len)
3336 return 0;
3337 x = sp->xvec[idx];
3338 if (!x->type->reject)
3339 return 0;
3340 return x->type->reject(x, skb, fl);
3341 }
3342
3343 /* When skb is transformed back to its "native" form, we have to
3344 * check policy restrictions. At the moment we make this in maximally
3345 * stupid way. Shame on me. :-) Of course, connected sockets must
3346 * have policy cached at them.
3347 */
3348
3349 static inline int
xfrm_state_ok(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family,u32 if_id)3350 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3351 unsigned short family, u32 if_id)
3352 {
3353 if (xfrm_state_kern(x))
3354 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3355 return x->id.proto == tmpl->id.proto &&
3356 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3357 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3358 x->props.mode == tmpl->mode &&
3359 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3360 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3361 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3362 xfrm_state_addr_cmp(tmpl, x, family)) &&
3363 (if_id == 0 || if_id == x->if_id);
3364 }
3365
3366 /*
3367 * 0 or more than 0 is returned when validation is succeeded (either bypass
3368 * because of optional transport mode, or next index of the matched secpath
3369 * state with the template.
3370 * -1 is returned when no matching template is found.
3371 * Otherwise "-2 - errored_index" is returned.
3372 */
3373 static inline int
xfrm_policy_ok(const struct xfrm_tmpl * tmpl,const struct sec_path * sp,int start,unsigned short family,u32 if_id)3374 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3375 unsigned short family, u32 if_id)
3376 {
3377 int idx = start;
3378
3379 if (tmpl->optional) {
3380 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3381 return start;
3382 } else
3383 start = -1;
3384 for (; idx < sp->len; idx++) {
3385 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3386 return ++idx;
3387 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3388 if (idx < sp->verified_cnt) {
3389 /* Secpath entry previously verified, consider optional and
3390 * continue searching
3391 */
3392 continue;
3393 }
3394
3395 if (start == -1)
3396 start = -2-idx;
3397 break;
3398 }
3399 }
3400 return start;
3401 }
3402
3403 static void
decode_session4(struct sk_buff * skb,struct flowi * fl,bool reverse)3404 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3405 {
3406 const struct iphdr *iph = ip_hdr(skb);
3407 int ihl = iph->ihl;
3408 u8 *xprth = skb_network_header(skb) + ihl * 4;
3409 struct flowi4 *fl4 = &fl->u.ip4;
3410 int oif = 0;
3411
3412 if (skb_dst(skb) && skb_dst(skb)->dev)
3413 oif = skb_dst(skb)->dev->ifindex;
3414
3415 memset(fl4, 0, sizeof(struct flowi4));
3416 fl4->flowi4_mark = skb->mark;
3417 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3418
3419 fl4->flowi4_proto = iph->protocol;
3420 fl4->daddr = reverse ? iph->saddr : iph->daddr;
3421 fl4->saddr = reverse ? iph->daddr : iph->saddr;
3422 fl4->flowi4_tos = iph->tos & ~INET_ECN_MASK;
3423
3424 if (!ip_is_fragment(iph)) {
3425 switch (iph->protocol) {
3426 case IPPROTO_UDP:
3427 case IPPROTO_UDPLITE:
3428 case IPPROTO_TCP:
3429 case IPPROTO_SCTP:
3430 case IPPROTO_DCCP:
3431 if (xprth + 4 < skb->data ||
3432 pskb_may_pull(skb, xprth + 4 - skb->data)) {
3433 __be16 *ports;
3434
3435 xprth = skb_network_header(skb) + ihl * 4;
3436 ports = (__be16 *)xprth;
3437
3438 fl4->fl4_sport = ports[!!reverse];
3439 fl4->fl4_dport = ports[!reverse];
3440 }
3441 break;
3442 case IPPROTO_ICMP:
3443 if (xprth + 2 < skb->data ||
3444 pskb_may_pull(skb, xprth + 2 - skb->data)) {
3445 u8 *icmp;
3446
3447 xprth = skb_network_header(skb) + ihl * 4;
3448 icmp = xprth;
3449
3450 fl4->fl4_icmp_type = icmp[0];
3451 fl4->fl4_icmp_code = icmp[1];
3452 }
3453 break;
3454 case IPPROTO_GRE:
3455 if (xprth + 12 < skb->data ||
3456 pskb_may_pull(skb, xprth + 12 - skb->data)) {
3457 __be16 *greflags;
3458 __be32 *gre_hdr;
3459
3460 xprth = skb_network_header(skb) + ihl * 4;
3461 greflags = (__be16 *)xprth;
3462 gre_hdr = (__be32 *)xprth;
3463
3464 if (greflags[0] & GRE_KEY) {
3465 if (greflags[0] & GRE_CSUM)
3466 gre_hdr++;
3467 fl4->fl4_gre_key = gre_hdr[1];
3468 }
3469 }
3470 break;
3471 default:
3472 break;
3473 }
3474 }
3475 }
3476
3477 #if IS_ENABLED(CONFIG_IPV6)
3478 static void
decode_session6(struct sk_buff * skb,struct flowi * fl,bool reverse)3479 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3480 {
3481 struct flowi6 *fl6 = &fl->u.ip6;
3482 int onlyproto = 0;
3483 const struct ipv6hdr *hdr = ipv6_hdr(skb);
3484 u32 offset = sizeof(*hdr);
3485 struct ipv6_opt_hdr *exthdr;
3486 const unsigned char *nh = skb_network_header(skb);
3487 u16 nhoff = IP6CB(skb)->nhoff;
3488 int oif = 0;
3489 u8 nexthdr;
3490
3491 if (!nhoff)
3492 nhoff = offsetof(struct ipv6hdr, nexthdr);
3493
3494 nexthdr = nh[nhoff];
3495
3496 if (skb_dst(skb) && skb_dst(skb)->dev)
3497 oif = skb_dst(skb)->dev->ifindex;
3498
3499 memset(fl6, 0, sizeof(struct flowi6));
3500 fl6->flowi6_mark = skb->mark;
3501 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3502
3503 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3504 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3505
3506 while (nh + offset + sizeof(*exthdr) < skb->data ||
3507 pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3508 nh = skb_network_header(skb);
3509 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3510
3511 switch (nexthdr) {
3512 case NEXTHDR_FRAGMENT:
3513 onlyproto = 1;
3514 fallthrough;
3515 case NEXTHDR_ROUTING:
3516 case NEXTHDR_HOP:
3517 case NEXTHDR_DEST:
3518 offset += ipv6_optlen(exthdr);
3519 nexthdr = exthdr->nexthdr;
3520 break;
3521 case IPPROTO_UDP:
3522 case IPPROTO_UDPLITE:
3523 case IPPROTO_TCP:
3524 case IPPROTO_SCTP:
3525 case IPPROTO_DCCP:
3526 if (!onlyproto && (nh + offset + 4 < skb->data ||
3527 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3528 __be16 *ports;
3529
3530 nh = skb_network_header(skb);
3531 ports = (__be16 *)(nh + offset);
3532 fl6->fl6_sport = ports[!!reverse];
3533 fl6->fl6_dport = ports[!reverse];
3534 }
3535 fl6->flowi6_proto = nexthdr;
3536 return;
3537 case IPPROTO_ICMPV6:
3538 if (!onlyproto && (nh + offset + 2 < skb->data ||
3539 pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3540 u8 *icmp;
3541
3542 nh = skb_network_header(skb);
3543 icmp = (u8 *)(nh + offset);
3544 fl6->fl6_icmp_type = icmp[0];
3545 fl6->fl6_icmp_code = icmp[1];
3546 }
3547 fl6->flowi6_proto = nexthdr;
3548 return;
3549 case IPPROTO_GRE:
3550 if (!onlyproto &&
3551 (nh + offset + 12 < skb->data ||
3552 pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
3553 struct gre_base_hdr *gre_hdr;
3554 __be32 *gre_key;
3555
3556 nh = skb_network_header(skb);
3557 gre_hdr = (struct gre_base_hdr *)(nh + offset);
3558 gre_key = (__be32 *)(gre_hdr + 1);
3559
3560 if (gre_hdr->flags & GRE_KEY) {
3561 if (gre_hdr->flags & GRE_CSUM)
3562 gre_key++;
3563 fl6->fl6_gre_key = *gre_key;
3564 }
3565 }
3566 fl6->flowi6_proto = nexthdr;
3567 return;
3568
3569 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3570 case IPPROTO_MH:
3571 offset += ipv6_optlen(exthdr);
3572 if (!onlyproto && (nh + offset + 3 < skb->data ||
3573 pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3574 struct ip6_mh *mh;
3575
3576 nh = skb_network_header(skb);
3577 mh = (struct ip6_mh *)(nh + offset);
3578 fl6->fl6_mh_type = mh->ip6mh_type;
3579 }
3580 fl6->flowi6_proto = nexthdr;
3581 return;
3582 #endif
3583 default:
3584 fl6->flowi6_proto = nexthdr;
3585 return;
3586 }
3587 }
3588 }
3589 #endif
3590
__xfrm_decode_session(struct sk_buff * skb,struct flowi * fl,unsigned int family,int reverse)3591 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3592 unsigned int family, int reverse)
3593 {
3594 switch (family) {
3595 case AF_INET:
3596 decode_session4(skb, fl, reverse);
3597 break;
3598 #if IS_ENABLED(CONFIG_IPV6)
3599 case AF_INET6:
3600 decode_session6(skb, fl, reverse);
3601 break;
3602 #endif
3603 default:
3604 return -EAFNOSUPPORT;
3605 }
3606
3607 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3608 }
3609 EXPORT_SYMBOL(__xfrm_decode_session);
3610
secpath_has_nontransport(const struct sec_path * sp,int k,int * idxp)3611 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3612 {
3613 for (; k < sp->len; k++) {
3614 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3615 *idxp = k;
3616 return 1;
3617 }
3618 }
3619
3620 return 0;
3621 }
3622
__xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)3623 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3624 unsigned short family)
3625 {
3626 struct net *net = dev_net(skb->dev);
3627 struct xfrm_policy *pol;
3628 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3629 int npols = 0;
3630 int xfrm_nr;
3631 int pi;
3632 int reverse;
3633 struct flowi fl;
3634 int xerr_idx = -1;
3635 const struct xfrm_if_cb *ifcb;
3636 struct sec_path *sp;
3637 u32 if_id = 0;
3638
3639 rcu_read_lock();
3640 ifcb = xfrm_if_get_cb();
3641
3642 if (ifcb) {
3643 struct xfrm_if_decode_session_result r;
3644
3645 if (ifcb->decode_session(skb, family, &r)) {
3646 if_id = r.if_id;
3647 net = r.net;
3648 }
3649 }
3650 rcu_read_unlock();
3651
3652 reverse = dir & ~XFRM_POLICY_MASK;
3653 dir &= XFRM_POLICY_MASK;
3654
3655 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3656 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3657 return 0;
3658 }
3659
3660 nf_nat_decode_session(skb, &fl, family);
3661
3662 /* First, check used SA against their selectors. */
3663 sp = skb_sec_path(skb);
3664 if (sp) {
3665 int i;
3666
3667 for (i = sp->len - 1; i >= 0; i--) {
3668 struct xfrm_state *x = sp->xvec[i];
3669 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3670 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3671 return 0;
3672 }
3673 }
3674 }
3675
3676 pol = NULL;
3677 sk = sk_to_full_sk(sk);
3678 if (sk && sk->sk_policy[dir]) {
3679 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3680 if (IS_ERR(pol)) {
3681 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3682 return 0;
3683 }
3684 }
3685
3686 if (!pol)
3687 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3688
3689 if (IS_ERR(pol)) {
3690 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3691 return 0;
3692 }
3693
3694 if (!pol) {
3695 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3696 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3697 return 0;
3698 }
3699
3700 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3701 xfrm_secpath_reject(xerr_idx, skb, &fl);
3702 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3703 return 0;
3704 }
3705 return 1;
3706 }
3707
3708 /* This lockless write can happen from different cpus. */
3709 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3710
3711 pols[0] = pol;
3712 npols++;
3713 #ifdef CONFIG_XFRM_SUB_POLICY
3714 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3715 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3716 &fl, family,
3717 XFRM_POLICY_IN, if_id);
3718 if (pols[1]) {
3719 if (IS_ERR(pols[1])) {
3720 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3721 xfrm_pol_put(pols[0]);
3722 return 0;
3723 }
3724 /* This write can happen from different cpus. */
3725 WRITE_ONCE(pols[1]->curlft.use_time,
3726 ktime_get_real_seconds());
3727 npols++;
3728 }
3729 }
3730 #endif
3731
3732 if (pol->action == XFRM_POLICY_ALLOW) {
3733 static struct sec_path dummy;
3734 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3735 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3736 struct xfrm_tmpl **tpp = tp;
3737 int ti = 0;
3738 int i, k;
3739
3740 sp = skb_sec_path(skb);
3741 if (!sp)
3742 sp = &dummy;
3743
3744 for (pi = 0; pi < npols; pi++) {
3745 if (pols[pi] != pol &&
3746 pols[pi]->action != XFRM_POLICY_ALLOW) {
3747 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3748 goto reject;
3749 }
3750 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3751 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3752 goto reject_error;
3753 }
3754 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3755 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3756 }
3757 xfrm_nr = ti;
3758
3759 if (npols > 1) {
3760 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3761 tpp = stp;
3762 }
3763
3764 /* For each tunnel xfrm, find the first matching tmpl.
3765 * For each tmpl before that, find corresponding xfrm.
3766 * Order is _important_. Later we will implement
3767 * some barriers, but at the moment barriers
3768 * are implied between each two transformations.
3769 * Upon success, marks secpath entries as having been
3770 * verified to allow them to be skipped in future policy
3771 * checks (e.g. nested tunnels).
3772 */
3773 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3774 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3775 if (k < 0) {
3776 if (k < -1)
3777 /* "-2 - errored_index" returned */
3778 xerr_idx = -(2+k);
3779 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3780 goto reject;
3781 }
3782 }
3783
3784 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3785 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3786 goto reject;
3787 }
3788
3789 xfrm_pols_put(pols, npols);
3790 sp->verified_cnt = k;
3791
3792 return 1;
3793 }
3794 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3795
3796 reject:
3797 xfrm_secpath_reject(xerr_idx, skb, &fl);
3798 reject_error:
3799 xfrm_pols_put(pols, npols);
3800 return 0;
3801 }
3802 EXPORT_SYMBOL(__xfrm_policy_check);
3803
__xfrm_route_forward(struct sk_buff * skb,unsigned short family)3804 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3805 {
3806 struct net *net = dev_net(skb->dev);
3807 struct flowi fl;
3808 struct dst_entry *dst;
3809 int res = 1;
3810
3811 if (xfrm_decode_session(skb, &fl, family) < 0) {
3812 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3813 return 0;
3814 }
3815
3816 skb_dst_force(skb);
3817 if (!skb_dst(skb)) {
3818 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3819 return 0;
3820 }
3821
3822 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3823 if (IS_ERR(dst)) {
3824 res = 0;
3825 dst = NULL;
3826 }
3827 skb_dst_set(skb, dst);
3828 return res;
3829 }
3830 EXPORT_SYMBOL(__xfrm_route_forward);
3831
3832 /* Optimize later using cookies and generation ids. */
3833
xfrm_dst_check(struct dst_entry * dst,u32 cookie)3834 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3835 {
3836 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3837 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3838 * get validated by dst_ops->check on every use. We do this
3839 * because when a normal route referenced by an XFRM dst is
3840 * obsoleted we do not go looking around for all parent
3841 * referencing XFRM dsts so that we can invalidate them. It
3842 * is just too much work. Instead we make the checks here on
3843 * every use. For example:
3844 *
3845 * XFRM dst A --> IPv4 dst X
3846 *
3847 * X is the "xdst->route" of A (X is also the "dst->path" of A
3848 * in this example). If X is marked obsolete, "A" will not
3849 * notice. That's what we are validating here via the
3850 * stale_bundle() check.
3851 *
3852 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3853 * be marked on it.
3854 * This will force stale_bundle() to fail on any xdst bundle with
3855 * this dst linked in it.
3856 */
3857 if (dst->obsolete < 0 && !stale_bundle(dst))
3858 return dst;
3859
3860 return NULL;
3861 }
3862
stale_bundle(struct dst_entry * dst)3863 static int stale_bundle(struct dst_entry *dst)
3864 {
3865 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3866 }
3867
xfrm_dst_ifdown(struct dst_entry * dst,struct net_device * dev)3868 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3869 {
3870 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3871 dst->dev = blackhole_netdev;
3872 dev_hold(dst->dev);
3873 dev_put(dev);
3874 }
3875 }
3876 EXPORT_SYMBOL(xfrm_dst_ifdown);
3877
xfrm_link_failure(struct sk_buff * skb)3878 static void xfrm_link_failure(struct sk_buff *skb)
3879 {
3880 /* Impossible. Such dst must be popped before reaches point of failure. */
3881 }
3882
xfrm_negative_advice(struct sock * sk,struct dst_entry * dst)3883 static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
3884 {
3885 if (dst->obsolete)
3886 sk_dst_reset(sk);
3887 }
3888
xfrm_init_pmtu(struct xfrm_dst ** bundle,int nr)3889 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3890 {
3891 while (nr--) {
3892 struct xfrm_dst *xdst = bundle[nr];
3893 u32 pmtu, route_mtu_cached;
3894 struct dst_entry *dst;
3895
3896 dst = &xdst->u.dst;
3897 pmtu = dst_mtu(xfrm_dst_child(dst));
3898 xdst->child_mtu_cached = pmtu;
3899
3900 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3901
3902 route_mtu_cached = dst_mtu(xdst->route);
3903 xdst->route_mtu_cached = route_mtu_cached;
3904
3905 if (pmtu > route_mtu_cached)
3906 pmtu = route_mtu_cached;
3907
3908 dst_metric_set(dst, RTAX_MTU, pmtu);
3909 }
3910 }
3911
3912 /* Check that the bundle accepts the flow and its components are
3913 * still valid.
3914 */
3915
xfrm_bundle_ok(struct xfrm_dst * first)3916 static int xfrm_bundle_ok(struct xfrm_dst *first)
3917 {
3918 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3919 struct dst_entry *dst = &first->u.dst;
3920 struct xfrm_dst *xdst;
3921 int start_from, nr;
3922 u32 mtu;
3923
3924 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3925 (dst->dev && !netif_running(dst->dev)))
3926 return 0;
3927
3928 if (dst->flags & DST_XFRM_QUEUE)
3929 return 1;
3930
3931 start_from = nr = 0;
3932 do {
3933 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3934
3935 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3936 return 0;
3937 if (xdst->xfrm_genid != dst->xfrm->genid)
3938 return 0;
3939 if (xdst->num_pols > 0 &&
3940 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3941 return 0;
3942
3943 bundle[nr++] = xdst;
3944
3945 mtu = dst_mtu(xfrm_dst_child(dst));
3946 if (xdst->child_mtu_cached != mtu) {
3947 start_from = nr;
3948 xdst->child_mtu_cached = mtu;
3949 }
3950
3951 if (!dst_check(xdst->route, xdst->route_cookie))
3952 return 0;
3953 mtu = dst_mtu(xdst->route);
3954 if (xdst->route_mtu_cached != mtu) {
3955 start_from = nr;
3956 xdst->route_mtu_cached = mtu;
3957 }
3958
3959 dst = xfrm_dst_child(dst);
3960 } while (dst->xfrm);
3961
3962 if (likely(!start_from))
3963 return 1;
3964
3965 xdst = bundle[start_from - 1];
3966 mtu = xdst->child_mtu_cached;
3967 while (start_from--) {
3968 dst = &xdst->u.dst;
3969
3970 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3971 if (mtu > xdst->route_mtu_cached)
3972 mtu = xdst->route_mtu_cached;
3973 dst_metric_set(dst, RTAX_MTU, mtu);
3974 if (!start_from)
3975 break;
3976
3977 xdst = bundle[start_from - 1];
3978 xdst->child_mtu_cached = mtu;
3979 }
3980
3981 return 1;
3982 }
3983
xfrm_default_advmss(const struct dst_entry * dst)3984 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3985 {
3986 return dst_metric_advmss(xfrm_dst_path(dst));
3987 }
3988
xfrm_mtu(const struct dst_entry * dst)3989 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3990 {
3991 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3992
3993 return mtu ? : dst_mtu(xfrm_dst_path(dst));
3994 }
3995
xfrm_get_dst_nexthop(const struct dst_entry * dst,const void * daddr)3996 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3997 const void *daddr)
3998 {
3999 while (dst->xfrm) {
4000 const struct xfrm_state *xfrm = dst->xfrm;
4001
4002 dst = xfrm_dst_child(dst);
4003
4004 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4005 continue;
4006 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4007 daddr = xfrm->coaddr;
4008 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4009 daddr = &xfrm->id.daddr;
4010 }
4011 return daddr;
4012 }
4013
xfrm_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)4014 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4015 struct sk_buff *skb,
4016 const void *daddr)
4017 {
4018 const struct dst_entry *path = xfrm_dst_path(dst);
4019
4020 if (!skb)
4021 daddr = xfrm_get_dst_nexthop(dst, daddr);
4022 return path->ops->neigh_lookup(path, skb, daddr);
4023 }
4024
xfrm_confirm_neigh(const struct dst_entry * dst,const void * daddr)4025 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4026 {
4027 const struct dst_entry *path = xfrm_dst_path(dst);
4028
4029 daddr = xfrm_get_dst_nexthop(dst, daddr);
4030 path->ops->confirm_neigh(path, daddr);
4031 }
4032
xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo * afinfo,int family)4033 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4034 {
4035 int err = 0;
4036
4037 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4038 return -EAFNOSUPPORT;
4039
4040 spin_lock(&xfrm_policy_afinfo_lock);
4041 if (unlikely(xfrm_policy_afinfo[family] != NULL))
4042 err = -EEXIST;
4043 else {
4044 struct dst_ops *dst_ops = afinfo->dst_ops;
4045 if (likely(dst_ops->kmem_cachep == NULL))
4046 dst_ops->kmem_cachep = xfrm_dst_cache;
4047 if (likely(dst_ops->check == NULL))
4048 dst_ops->check = xfrm_dst_check;
4049 if (likely(dst_ops->default_advmss == NULL))
4050 dst_ops->default_advmss = xfrm_default_advmss;
4051 if (likely(dst_ops->mtu == NULL))
4052 dst_ops->mtu = xfrm_mtu;
4053 if (likely(dst_ops->negative_advice == NULL))
4054 dst_ops->negative_advice = xfrm_negative_advice;
4055 if (likely(dst_ops->link_failure == NULL))
4056 dst_ops->link_failure = xfrm_link_failure;
4057 if (likely(dst_ops->neigh_lookup == NULL))
4058 dst_ops->neigh_lookup = xfrm_neigh_lookup;
4059 if (likely(!dst_ops->confirm_neigh))
4060 dst_ops->confirm_neigh = xfrm_confirm_neigh;
4061 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4062 }
4063 spin_unlock(&xfrm_policy_afinfo_lock);
4064
4065 return err;
4066 }
4067 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4068
xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo * afinfo)4069 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4070 {
4071 struct dst_ops *dst_ops = afinfo->dst_ops;
4072 int i;
4073
4074 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4075 if (xfrm_policy_afinfo[i] != afinfo)
4076 continue;
4077 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4078 break;
4079 }
4080
4081 synchronize_rcu();
4082
4083 dst_ops->kmem_cachep = NULL;
4084 dst_ops->check = NULL;
4085 dst_ops->negative_advice = NULL;
4086 dst_ops->link_failure = NULL;
4087 }
4088 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4089
xfrm_if_register_cb(const struct xfrm_if_cb * ifcb)4090 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4091 {
4092 spin_lock(&xfrm_if_cb_lock);
4093 rcu_assign_pointer(xfrm_if_cb, ifcb);
4094 spin_unlock(&xfrm_if_cb_lock);
4095 }
4096 EXPORT_SYMBOL(xfrm_if_register_cb);
4097
xfrm_if_unregister_cb(void)4098 void xfrm_if_unregister_cb(void)
4099 {
4100 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4101 synchronize_rcu();
4102 }
4103 EXPORT_SYMBOL(xfrm_if_unregister_cb);
4104
4105 #ifdef CONFIG_XFRM_STATISTICS
xfrm_statistics_init(struct net * net)4106 static int __net_init xfrm_statistics_init(struct net *net)
4107 {
4108 int rv;
4109 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4110 if (!net->mib.xfrm_statistics)
4111 return -ENOMEM;
4112 rv = xfrm_proc_init(net);
4113 if (rv < 0)
4114 free_percpu(net->mib.xfrm_statistics);
4115 return rv;
4116 }
4117
xfrm_statistics_fini(struct net * net)4118 static void xfrm_statistics_fini(struct net *net)
4119 {
4120 xfrm_proc_fini(net);
4121 free_percpu(net->mib.xfrm_statistics);
4122 }
4123 #else
xfrm_statistics_init(struct net * net)4124 static int __net_init xfrm_statistics_init(struct net *net)
4125 {
4126 return 0;
4127 }
4128
xfrm_statistics_fini(struct net * net)4129 static void xfrm_statistics_fini(struct net *net)
4130 {
4131 }
4132 #endif
4133
xfrm_policy_init(struct net * net)4134 static int __net_init xfrm_policy_init(struct net *net)
4135 {
4136 unsigned int hmask, sz;
4137 int dir, err;
4138
4139 if (net_eq(net, &init_net)) {
4140 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4141 sizeof(struct xfrm_dst),
4142 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4143 NULL);
4144 err = rhashtable_init(&xfrm_policy_inexact_table,
4145 &xfrm_pol_inexact_params);
4146 BUG_ON(err);
4147 }
4148
4149 hmask = 8 - 1;
4150 sz = (hmask+1) * sizeof(struct hlist_head);
4151
4152 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4153 if (!net->xfrm.policy_byidx)
4154 goto out_byidx;
4155 net->xfrm.policy_idx_hmask = hmask;
4156
4157 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4158 struct xfrm_policy_hash *htab;
4159
4160 net->xfrm.policy_count[dir] = 0;
4161 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4162 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4163
4164 htab = &net->xfrm.policy_bydst[dir];
4165 htab->table = xfrm_hash_alloc(sz);
4166 if (!htab->table)
4167 goto out_bydst;
4168 htab->hmask = hmask;
4169 htab->dbits4 = 32;
4170 htab->sbits4 = 32;
4171 htab->dbits6 = 128;
4172 htab->sbits6 = 128;
4173 }
4174 net->xfrm.policy_hthresh.lbits4 = 32;
4175 net->xfrm.policy_hthresh.rbits4 = 32;
4176 net->xfrm.policy_hthresh.lbits6 = 128;
4177 net->xfrm.policy_hthresh.rbits6 = 128;
4178
4179 seqlock_init(&net->xfrm.policy_hthresh.lock);
4180
4181 INIT_LIST_HEAD(&net->xfrm.policy_all);
4182 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4183 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4184 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4185 return 0;
4186
4187 out_bydst:
4188 for (dir--; dir >= 0; dir--) {
4189 struct xfrm_policy_hash *htab;
4190
4191 htab = &net->xfrm.policy_bydst[dir];
4192 xfrm_hash_free(htab->table, sz);
4193 }
4194 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4195 out_byidx:
4196 return -ENOMEM;
4197 }
4198
xfrm_policy_fini(struct net * net)4199 static void xfrm_policy_fini(struct net *net)
4200 {
4201 struct xfrm_pol_inexact_bin *b, *t;
4202 unsigned int sz;
4203 int dir;
4204
4205 flush_work(&net->xfrm.policy_hash_work);
4206 #ifdef CONFIG_XFRM_SUB_POLICY
4207 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4208 #endif
4209 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4210
4211 WARN_ON(!list_empty(&net->xfrm.policy_all));
4212
4213 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4214 struct xfrm_policy_hash *htab;
4215
4216 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4217
4218 htab = &net->xfrm.policy_bydst[dir];
4219 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4220 WARN_ON(!hlist_empty(htab->table));
4221 xfrm_hash_free(htab->table, sz);
4222 }
4223
4224 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4225 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4226 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4227
4228 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4229 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4230 __xfrm_policy_inexact_prune_bin(b, true);
4231 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4232 }
4233
xfrm_net_init(struct net * net)4234 static int __net_init xfrm_net_init(struct net *net)
4235 {
4236 int rv;
4237
4238 /* Initialize the per-net locks here */
4239 spin_lock_init(&net->xfrm.xfrm_state_lock);
4240 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4241 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4242 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4243 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4244 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4245 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4246
4247 rv = xfrm_statistics_init(net);
4248 if (rv < 0)
4249 goto out_statistics;
4250 rv = xfrm_state_init(net);
4251 if (rv < 0)
4252 goto out_state;
4253 rv = xfrm_policy_init(net);
4254 if (rv < 0)
4255 goto out_policy;
4256 rv = xfrm_sysctl_init(net);
4257 if (rv < 0)
4258 goto out_sysctl;
4259
4260 return 0;
4261
4262 out_sysctl:
4263 xfrm_policy_fini(net);
4264 out_policy:
4265 xfrm_state_fini(net);
4266 out_state:
4267 xfrm_statistics_fini(net);
4268 out_statistics:
4269 return rv;
4270 }
4271
xfrm_net_exit(struct net * net)4272 static void __net_exit xfrm_net_exit(struct net *net)
4273 {
4274 xfrm_sysctl_fini(net);
4275 xfrm_policy_fini(net);
4276 xfrm_state_fini(net);
4277 xfrm_statistics_fini(net);
4278 }
4279
4280 static struct pernet_operations __net_initdata xfrm_net_ops = {
4281 .init = xfrm_net_init,
4282 .exit = xfrm_net_exit,
4283 };
4284
xfrm_init(void)4285 void __init xfrm_init(void)
4286 {
4287 register_pernet_subsys(&xfrm_net_ops);
4288 xfrm_dev_init();
4289 xfrm_input_init();
4290
4291 #ifdef CONFIG_XFRM_ESPINTCP
4292 espintcp_init();
4293 #endif
4294 }
4295
4296 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_common_policyinfo(struct xfrm_policy * xp,struct audit_buffer * audit_buf)4297 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4298 struct audit_buffer *audit_buf)
4299 {
4300 struct xfrm_sec_ctx *ctx = xp->security;
4301 struct xfrm_selector *sel = &xp->selector;
4302
4303 if (ctx)
4304 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4305 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4306
4307 switch (sel->family) {
4308 case AF_INET:
4309 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4310 if (sel->prefixlen_s != 32)
4311 audit_log_format(audit_buf, " src_prefixlen=%d",
4312 sel->prefixlen_s);
4313 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4314 if (sel->prefixlen_d != 32)
4315 audit_log_format(audit_buf, " dst_prefixlen=%d",
4316 sel->prefixlen_d);
4317 break;
4318 case AF_INET6:
4319 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4320 if (sel->prefixlen_s != 128)
4321 audit_log_format(audit_buf, " src_prefixlen=%d",
4322 sel->prefixlen_s);
4323 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4324 if (sel->prefixlen_d != 128)
4325 audit_log_format(audit_buf, " dst_prefixlen=%d",
4326 sel->prefixlen_d);
4327 break;
4328 }
4329 }
4330
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)4331 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4332 {
4333 struct audit_buffer *audit_buf;
4334
4335 audit_buf = xfrm_audit_start("SPD-add");
4336 if (audit_buf == NULL)
4337 return;
4338 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4339 audit_log_format(audit_buf, " res=%u", result);
4340 xfrm_audit_common_policyinfo(xp, audit_buf);
4341 audit_log_end(audit_buf);
4342 }
4343 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4344
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)4345 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4346 bool task_valid)
4347 {
4348 struct audit_buffer *audit_buf;
4349
4350 audit_buf = xfrm_audit_start("SPD-delete");
4351 if (audit_buf == NULL)
4352 return;
4353 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4354 audit_log_format(audit_buf, " res=%u", result);
4355 xfrm_audit_common_policyinfo(xp, audit_buf);
4356 audit_log_end(audit_buf);
4357 }
4358 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4359 #endif
4360
4361 #ifdef CONFIG_XFRM_MIGRATE
xfrm_migrate_selector_match(const struct xfrm_selector * sel_cmp,const struct xfrm_selector * sel_tgt)4362 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4363 const struct xfrm_selector *sel_tgt)
4364 {
4365 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4366 if (sel_tgt->family == sel_cmp->family &&
4367 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4368 sel_cmp->family) &&
4369 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4370 sel_cmp->family) &&
4371 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4372 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4373 return true;
4374 }
4375 } else {
4376 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4377 return true;
4378 }
4379 }
4380 return false;
4381 }
4382
xfrm_migrate_policy_find(const struct xfrm_selector * sel,u8 dir,u8 type,struct net * net,u32 if_id)4383 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4384 u8 dir, u8 type, struct net *net, u32 if_id)
4385 {
4386 struct xfrm_policy *pol, *ret = NULL;
4387 struct hlist_head *chain;
4388 u32 priority = ~0U;
4389
4390 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4391 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4392 hlist_for_each_entry(pol, chain, bydst) {
4393 if ((if_id == 0 || pol->if_id == if_id) &&
4394 xfrm_migrate_selector_match(sel, &pol->selector) &&
4395 pol->type == type) {
4396 ret = pol;
4397 priority = ret->priority;
4398 break;
4399 }
4400 }
4401 chain = &net->xfrm.policy_inexact[dir];
4402 hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4403 if ((pol->priority >= priority) && ret)
4404 break;
4405
4406 if ((if_id == 0 || pol->if_id == if_id) &&
4407 xfrm_migrate_selector_match(sel, &pol->selector) &&
4408 pol->type == type) {
4409 ret = pol;
4410 break;
4411 }
4412 }
4413
4414 xfrm_pol_hold(ret);
4415
4416 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4417
4418 return ret;
4419 }
4420
migrate_tmpl_match(const struct xfrm_migrate * m,const struct xfrm_tmpl * t)4421 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4422 {
4423 int match = 0;
4424
4425 if (t->mode == m->mode && t->id.proto == m->proto &&
4426 (m->reqid == 0 || t->reqid == m->reqid)) {
4427 switch (t->mode) {
4428 case XFRM_MODE_TUNNEL:
4429 case XFRM_MODE_BEET:
4430 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4431 m->old_family) &&
4432 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4433 m->old_family)) {
4434 match = 1;
4435 }
4436 break;
4437 case XFRM_MODE_TRANSPORT:
4438 /* in case of transport mode, template does not store
4439 any IP addresses, hence we just compare mode and
4440 protocol */
4441 match = 1;
4442 break;
4443 default:
4444 break;
4445 }
4446 }
4447 return match;
4448 }
4449
4450 /* update endpoint address(es) of template(s) */
xfrm_policy_migrate(struct xfrm_policy * pol,struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4451 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4452 struct xfrm_migrate *m, int num_migrate,
4453 struct netlink_ext_ack *extack)
4454 {
4455 struct xfrm_migrate *mp;
4456 int i, j, n = 0;
4457
4458 write_lock_bh(&pol->lock);
4459 if (unlikely(pol->walk.dead)) {
4460 /* target policy has been deleted */
4461 NL_SET_ERR_MSG(extack, "Target policy not found");
4462 write_unlock_bh(&pol->lock);
4463 return -ENOENT;
4464 }
4465
4466 for (i = 0; i < pol->xfrm_nr; i++) {
4467 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4468 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4469 continue;
4470 n++;
4471 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4472 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4473 continue;
4474 /* update endpoints */
4475 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4476 sizeof(pol->xfrm_vec[i].id.daddr));
4477 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4478 sizeof(pol->xfrm_vec[i].saddr));
4479 pol->xfrm_vec[i].encap_family = mp->new_family;
4480 /* flush bundles */
4481 atomic_inc(&pol->genid);
4482 }
4483 }
4484
4485 write_unlock_bh(&pol->lock);
4486
4487 if (!n)
4488 return -ENODATA;
4489
4490 return 0;
4491 }
4492
xfrm_migrate_check(const struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4493 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4494 struct netlink_ext_ack *extack)
4495 {
4496 int i, j;
4497
4498 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4499 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4500 return -EINVAL;
4501 }
4502
4503 for (i = 0; i < num_migrate; i++) {
4504 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4505 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4506 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4507 return -EINVAL;
4508 }
4509
4510 /* check if there is any duplicated entry */
4511 for (j = i + 1; j < num_migrate; j++) {
4512 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4513 sizeof(m[i].old_daddr)) &&
4514 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4515 sizeof(m[i].old_saddr)) &&
4516 m[i].proto == m[j].proto &&
4517 m[i].mode == m[j].mode &&
4518 m[i].reqid == m[j].reqid &&
4519 m[i].old_family == m[j].old_family) {
4520 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4521 return -EINVAL;
4522 }
4523 }
4524 }
4525
4526 return 0;
4527 }
4528
xfrm_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,struct xfrm_migrate * m,int num_migrate,struct xfrm_kmaddress * k,struct net * net,struct xfrm_encap_tmpl * encap,u32 if_id,struct netlink_ext_ack * extack)4529 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4530 struct xfrm_migrate *m, int num_migrate,
4531 struct xfrm_kmaddress *k, struct net *net,
4532 struct xfrm_encap_tmpl *encap, u32 if_id,
4533 struct netlink_ext_ack *extack)
4534 {
4535 int i, err, nx_cur = 0, nx_new = 0;
4536 struct xfrm_policy *pol = NULL;
4537 struct xfrm_state *x, *xc;
4538 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4539 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4540 struct xfrm_migrate *mp;
4541
4542 /* Stage 0 - sanity checks */
4543 err = xfrm_migrate_check(m, num_migrate, extack);
4544 if (err < 0)
4545 goto out;
4546
4547 if (dir >= XFRM_POLICY_MAX) {
4548 NL_SET_ERR_MSG(extack, "Invalid policy direction");
4549 err = -EINVAL;
4550 goto out;
4551 }
4552
4553 /* Stage 1 - find policy */
4554 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4555 if (!pol) {
4556 NL_SET_ERR_MSG(extack, "Target policy not found");
4557 err = -ENOENT;
4558 goto out;
4559 }
4560
4561 /* Stage 2 - find and update state(s) */
4562 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4563 if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4564 x_cur[nx_cur] = x;
4565 nx_cur++;
4566 xc = xfrm_state_migrate(x, mp, encap);
4567 if (xc) {
4568 x_new[nx_new] = xc;
4569 nx_new++;
4570 } else {
4571 err = -ENODATA;
4572 goto restore_state;
4573 }
4574 }
4575 }
4576
4577 /* Stage 3 - update policy */
4578 err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4579 if (err < 0)
4580 goto restore_state;
4581
4582 /* Stage 4 - delete old state(s) */
4583 if (nx_cur) {
4584 xfrm_states_put(x_cur, nx_cur);
4585 xfrm_states_delete(x_cur, nx_cur);
4586 }
4587
4588 /* Stage 5 - announce */
4589 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4590
4591 xfrm_pol_put(pol);
4592
4593 return 0;
4594 out:
4595 return err;
4596
4597 restore_state:
4598 if (pol)
4599 xfrm_pol_put(pol);
4600 if (nx_cur)
4601 xfrm_states_put(x_cur, nx_cur);
4602 if (nx_new)
4603 xfrm_states_delete(x_new, nx_new);
4604
4605 return err;
4606 }
4607 EXPORT_SYMBOL(xfrm_migrate);
4608 #endif
4609