1 #ifndef _NET_XFRM_H 2 #define _NET_XFRM_H 3 4 #include <linux/compiler.h> 5 #include <linux/in.h> 6 #include <linux/xfrm.h> 7 #include <linux/spinlock.h> 8 #include <linux/list.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/pfkeyv2.h> 12 #include <linux/ipsec.h> 13 #include <linux/in6.h> 14 #include <linux/mutex.h> 15 16 #include <net/sock.h> 17 #include <net/dst.h> 18 #include <net/route.h> 19 #include <net/ipv6.h> 20 #include <net/ip6_fib.h> 21 22 #define XFRM_ALIGN8(len) (((len) + 7) & ~7) 23 #define MODULE_ALIAS_XFRM_MODE(family, encap) \ 24 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) 25 26 extern struct sock *xfrm_nl; 27 extern u32 sysctl_xfrm_aevent_etime; 28 extern u32 sysctl_xfrm_aevent_rseqth; 29 30 extern struct mutex xfrm_cfg_mutex; 31 32 /* Organization of SPD aka "XFRM rules" 33 ------------------------------------ 34 35 Basic objects: 36 - policy rule, struct xfrm_policy (=SPD entry) 37 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle) 38 - instance of a transformer, struct xfrm_state (=SA) 39 - template to clone xfrm_state, struct xfrm_tmpl 40 41 SPD is plain linear list of xfrm_policy rules, ordered by priority. 42 (To be compatible with existing pfkeyv2 implementations, 43 many rules with priority of 0x7fffffff are allowed to exist and 44 such rules are ordered in an unpredictable way, thanks to bsd folks.) 45 46 Lookup is plain linear search until the first match with selector. 47 48 If "action" is "block", then we prohibit the flow, otherwise: 49 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise, 50 policy entry has list of up to XFRM_MAX_DEPTH transformations, 51 described by templates xfrm_tmpl. Each template is resolved 52 to a complete xfrm_state (see below) and we pack bundle of transformations 53 to a dst_entry returned to requestor. 54 55 dst -. xfrm .-> xfrm_state #1 56 |---. child .-> dst -. xfrm .-> xfrm_state #2 57 |---. child .-> dst -. xfrm .-> xfrm_state #3 58 |---. child .-> NULL 59 60 Bundles are cached at xrfm_policy struct (field ->bundles). 61 62 63 Resolution of xrfm_tmpl 64 ----------------------- 65 Template contains: 66 1. ->mode Mode: transport or tunnel 67 2. ->id.proto Protocol: AH/ESP/IPCOMP 68 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode. 69 Q: allow to resolve security gateway? 70 4. ->id.spi If not zero, static SPI. 71 5. ->saddr Local tunnel endpoint, ignored for transport mode. 72 6. ->algos List of allowed algos. Plain bitmask now. 73 Q: ealgos, aalgos, calgos. What a mess... 74 7. ->share Sharing mode. 75 Q: how to implement private sharing mode? To add struct sock* to 76 flow id? 77 78 Having this template we search through SAD searching for entries 79 with appropriate mode/proto/algo, permitted by selector. 80 If no appropriate entry found, it is requested from key manager. 81 82 PROBLEMS: 83 Q: How to find all the bundles referring to a physical path for 84 PMTU discovery? Seems, dst should contain list of all parents... 85 and enter to infinite locking hierarchy disaster. 86 No! It is easier, we will not search for them, let them find us. 87 We add genid to each dst plus pointer to genid of raw IP route, 88 pmtu disc will update pmtu on raw IP route and increase its genid. 89 dst_check() will see this for top level and trigger resyncing 90 metrics. Plus, it will be made via sk->sk_dst_cache. Solved. 91 */ 92 93 /* Full description of state of transformer. */ 94 struct xfrm_state 95 { 96 /* Note: bydst is re-used during gc */ 97 struct hlist_node bydst; 98 struct hlist_node bysrc; 99 struct hlist_node byspi; 100 101 atomic_t refcnt; 102 spinlock_t lock; 103 104 struct xfrm_id id; 105 struct xfrm_selector sel; 106 107 u32 genid; 108 109 /* Key manger bits */ 110 struct { 111 u8 state; 112 u8 dying; 113 u32 seq; 114 } km; 115 116 /* Parameters of this state. */ 117 struct { 118 u32 reqid; 119 u8 mode; 120 u8 replay_window; 121 u8 aalgo, ealgo, calgo; 122 u8 flags; 123 u16 family; 124 xfrm_address_t saddr; 125 int header_len; 126 int trailer_len; 127 } props; 128 129 struct xfrm_lifetime_cfg lft; 130 131 /* Data for transformer */ 132 struct xfrm_algo *aalg; 133 struct xfrm_algo *ealg; 134 struct xfrm_algo *calg; 135 136 /* Data for encapsulator */ 137 struct xfrm_encap_tmpl *encap; 138 139 /* Data for care-of address */ 140 xfrm_address_t *coaddr; 141 142 /* IPComp needs an IPIP tunnel for handling uncompressed packets */ 143 struct xfrm_state *tunnel; 144 145 /* If a tunnel, number of users + 1 */ 146 atomic_t tunnel_users; 147 148 /* State for replay detection */ 149 struct xfrm_replay_state replay; 150 151 /* Replay detection state at the time we sent the last notification */ 152 struct xfrm_replay_state preplay; 153 154 /* internal flag that only holds state for delayed aevent at the 155 * moment 156 */ 157 u32 xflags; 158 159 /* Replay detection notification settings */ 160 u32 replay_maxage; 161 u32 replay_maxdiff; 162 163 /* Replay detection notification timer */ 164 struct timer_list rtimer; 165 166 /* Statistics */ 167 struct xfrm_stats stats; 168 169 struct xfrm_lifetime_cur curlft; 170 struct timer_list timer; 171 172 /* Last used time */ 173 u64 lastused; 174 175 /* Reference to data common to all the instances of this 176 * transformer. */ 177 struct xfrm_type *type; 178 struct xfrm_mode *mode; 179 180 /* Security context */ 181 struct xfrm_sec_ctx *security; 182 183 /* Private data of this transformer, format is opaque, 184 * interpreted by xfrm_type methods. */ 185 void *data; 186 }; 187 188 /* xflags - make enum if more show up */ 189 #define XFRM_TIME_DEFER 1 190 191 enum { 192 XFRM_STATE_VOID, 193 XFRM_STATE_ACQ, 194 XFRM_STATE_VALID, 195 XFRM_STATE_ERROR, 196 XFRM_STATE_EXPIRED, 197 XFRM_STATE_DEAD 198 }; 199 200 /* callback structure passed from either netlink or pfkey */ 201 struct km_event 202 { 203 union { 204 u32 hard; 205 u32 proto; 206 u32 byid; 207 u32 aevent; 208 u32 type; 209 } data; 210 211 u32 seq; 212 u32 pid; 213 u32 event; 214 }; 215 216 struct xfrm_type; 217 struct xfrm_dst; 218 struct xfrm_policy_afinfo { 219 unsigned short family; 220 struct xfrm_type *type_map[IPPROTO_MAX]; 221 struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 222 struct dst_ops *dst_ops; 223 void (*garbage_collect)(void); 224 int (*dst_lookup)(struct xfrm_dst **dst, struct flowi *fl); 225 int (*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr); 226 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy); 227 int (*bundle_create)(struct xfrm_policy *policy, 228 struct xfrm_state **xfrm, 229 int nx, 230 struct flowi *fl, 231 struct dst_entry **dst_p); 232 void (*decode_session)(struct sk_buff *skb, 233 struct flowi *fl); 234 }; 235 236 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); 237 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); 238 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c); 239 extern void km_state_notify(struct xfrm_state *x, struct km_event *c); 240 #define XFRM_ACQ_EXPIRES 30 241 242 struct xfrm_tmpl; 243 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 244 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid); 245 extern int __xfrm_state_delete(struct xfrm_state *x); 246 247 struct xfrm_state_afinfo { 248 unsigned short family; 249 int (*init_flags)(struct xfrm_state *x); 250 void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, 251 struct xfrm_tmpl *tmpl, 252 xfrm_address_t *daddr, xfrm_address_t *saddr); 253 int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); 254 int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); 255 int (*output)(struct sk_buff *skb); 256 }; 257 258 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 259 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 260 extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family); 261 extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 262 263 extern void xfrm_state_delete_tunnel(struct xfrm_state *x); 264 265 struct xfrm_type 266 { 267 char *description; 268 struct module *owner; 269 __u8 proto; 270 __u8 flags; 271 #define XFRM_TYPE_NON_FRAGMENT 1 272 273 int (*init_state)(struct xfrm_state *x); 274 void (*destructor)(struct xfrm_state *); 275 int (*input)(struct xfrm_state *, struct sk_buff *skb); 276 int (*output)(struct xfrm_state *, struct sk_buff *pskb); 277 int (*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *); 278 int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **); 279 xfrm_address_t *(*local_addr)(struct xfrm_state *, xfrm_address_t *); 280 xfrm_address_t *(*remote_addr)(struct xfrm_state *, xfrm_address_t *); 281 /* Estimate maximal size of result of transformation of a dgram */ 282 u32 (*get_mtu)(struct xfrm_state *, int size); 283 }; 284 285 extern int xfrm_register_type(struct xfrm_type *type, unsigned short family); 286 extern int xfrm_unregister_type(struct xfrm_type *type, unsigned short family); 287 extern struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family); 288 extern void xfrm_put_type(struct xfrm_type *type); 289 290 struct xfrm_mode { 291 int (*input)(struct xfrm_state *x, struct sk_buff *skb); 292 int (*output)(struct xfrm_state *x,struct sk_buff *skb); 293 294 struct module *owner; 295 unsigned int encap; 296 }; 297 298 extern int xfrm_register_mode(struct xfrm_mode *mode, int family); 299 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family); 300 extern struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family); 301 extern void xfrm_put_mode(struct xfrm_mode *mode); 302 303 struct xfrm_tmpl 304 { 305 /* id in template is interpreted as: 306 * daddr - destination of tunnel, may be zero for transport mode. 307 * spi - zero to acquire spi. Not zero if spi is static, then 308 * daddr must be fixed too. 309 * proto - AH/ESP/IPCOMP 310 */ 311 struct xfrm_id id; 312 313 /* Source address of tunnel. Ignored, if it is not a tunnel. */ 314 xfrm_address_t saddr; 315 316 unsigned short encap_family; 317 318 __u32 reqid; 319 320 /* Mode: transport, tunnel etc. */ 321 __u8 mode; 322 323 /* Sharing mode: unique, this session only, this user only etc. */ 324 __u8 share; 325 326 /* May skip this transfomration if no SA is found */ 327 __u8 optional; 328 329 /* Bit mask of algos allowed for acquisition */ 330 __u32 aalgos; 331 __u32 ealgos; 332 __u32 calgos; 333 }; 334 335 #define XFRM_MAX_DEPTH 6 336 337 struct xfrm_policy 338 { 339 struct xfrm_policy *next; 340 struct hlist_node bydst; 341 struct hlist_node byidx; 342 343 /* This lock only affects elements except for entry. */ 344 rwlock_t lock; 345 atomic_t refcnt; 346 struct timer_list timer; 347 348 u32 priority; 349 u32 index; 350 struct xfrm_selector selector; 351 struct xfrm_lifetime_cfg lft; 352 struct xfrm_lifetime_cur curlft; 353 struct dst_entry *bundles; 354 u16 family; 355 u8 type; 356 u8 action; 357 u8 flags; 358 u8 dead; 359 u8 xfrm_nr; 360 /* XXX 1 byte hole, try to pack */ 361 struct xfrm_sec_ctx *security; 362 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 363 }; 364 365 struct xfrm_migrate { 366 xfrm_address_t old_daddr; 367 xfrm_address_t old_saddr; 368 xfrm_address_t new_daddr; 369 xfrm_address_t new_saddr; 370 u8 proto; 371 u8 mode; 372 u16 reserved; 373 u32 reqid; 374 u16 old_family; 375 u16 new_family; 376 }; 377 378 #define XFRM_KM_TIMEOUT 30 379 /* which seqno */ 380 #define XFRM_REPLAY_SEQ 1 381 #define XFRM_REPLAY_OSEQ 2 382 #define XFRM_REPLAY_SEQ_MASK 3 383 /* what happened */ 384 #define XFRM_REPLAY_UPDATE XFRM_AE_CR 385 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE 386 387 /* default aevent timeout in units of 100ms */ 388 #define XFRM_AE_ETIME 10 389 /* Async Event timer multiplier */ 390 #define XFRM_AE_ETH_M 10 391 /* default seq threshold size */ 392 #define XFRM_AE_SEQT_SIZE 2 393 394 struct xfrm_mgr 395 { 396 struct list_head list; 397 char *id; 398 int (*notify)(struct xfrm_state *x, struct km_event *c); 399 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); 400 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir); 401 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 402 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c); 403 int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); 404 int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles); 405 }; 406 407 extern int xfrm_register_km(struct xfrm_mgr *km); 408 extern int xfrm_unregister_km(struct xfrm_mgr *km); 409 410 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2]; 411 412 /* Audit Information */ 413 struct xfrm_audit 414 { 415 uid_t loginuid; 416 u32 secid; 417 }; 418 419 #ifdef CONFIG_AUDITSYSCALL 420 extern void xfrm_audit_log(uid_t auid, u32 secid, int type, int result, 421 struct xfrm_policy *xp, struct xfrm_state *x); 422 #else 423 #define xfrm_audit_log(a,s,t,r,p,x) do { ; } while (0) 424 #endif /* CONFIG_AUDITSYSCALL */ 425 426 static inline void xfrm_pol_hold(struct xfrm_policy *policy) 427 { 428 if (likely(policy != NULL)) 429 atomic_inc(&policy->refcnt); 430 } 431 432 extern void __xfrm_policy_destroy(struct xfrm_policy *policy); 433 434 static inline void xfrm_pol_put(struct xfrm_policy *policy) 435 { 436 if (atomic_dec_and_test(&policy->refcnt)) 437 __xfrm_policy_destroy(policy); 438 } 439 440 #ifdef CONFIG_XFRM_SUB_POLICY 441 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) 442 { 443 int i; 444 for (i = npols - 1; i >= 0; --i) 445 xfrm_pol_put(pols[i]); 446 } 447 #else 448 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) 449 { 450 xfrm_pol_put(pols[0]); 451 } 452 #endif 453 454 extern void __xfrm_state_destroy(struct xfrm_state *); 455 456 static inline void __xfrm_state_put(struct xfrm_state *x) 457 { 458 atomic_dec(&x->refcnt); 459 } 460 461 static inline void xfrm_state_put(struct xfrm_state *x) 462 { 463 if (atomic_dec_and_test(&x->refcnt)) 464 __xfrm_state_destroy(x); 465 } 466 467 static inline void xfrm_state_hold(struct xfrm_state *x) 468 { 469 atomic_inc(&x->refcnt); 470 } 471 472 static __inline__ int addr_match(void *token1, void *token2, int prefixlen) 473 { 474 __be32 *a1 = token1; 475 __be32 *a2 = token2; 476 int pdw; 477 int pbi; 478 479 pdw = prefixlen >> 5; /* num of whole __u32 in prefix */ 480 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ 481 482 if (pdw) 483 if (memcmp(a1, a2, pdw << 2)) 484 return 0; 485 486 if (pbi) { 487 __be32 mask; 488 489 mask = htonl((0xffffffff) << (32 - pbi)); 490 491 if ((a1[pdw] ^ a2[pdw]) & mask) 492 return 0; 493 } 494 495 return 1; 496 } 497 498 static __inline__ 499 __be16 xfrm_flowi_sport(struct flowi *fl) 500 { 501 __be16 port; 502 switch(fl->proto) { 503 case IPPROTO_TCP: 504 case IPPROTO_UDP: 505 case IPPROTO_UDPLITE: 506 case IPPROTO_SCTP: 507 port = fl->fl_ip_sport; 508 break; 509 case IPPROTO_ICMP: 510 case IPPROTO_ICMPV6: 511 port = htons(fl->fl_icmp_type); 512 break; 513 #ifdef CONFIG_IPV6_MIP6 514 case IPPROTO_MH: 515 port = htons(fl->fl_mh_type); 516 break; 517 #endif 518 default: 519 port = 0; /*XXX*/ 520 } 521 return port; 522 } 523 524 static __inline__ 525 __be16 xfrm_flowi_dport(struct flowi *fl) 526 { 527 __be16 port; 528 switch(fl->proto) { 529 case IPPROTO_TCP: 530 case IPPROTO_UDP: 531 case IPPROTO_UDPLITE: 532 case IPPROTO_SCTP: 533 port = fl->fl_ip_dport; 534 break; 535 case IPPROTO_ICMP: 536 case IPPROTO_ICMPV6: 537 port = htons(fl->fl_icmp_code); 538 break; 539 default: 540 port = 0; /*XXX*/ 541 } 542 return port; 543 } 544 545 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl, 546 unsigned short family); 547 548 #ifdef CONFIG_SECURITY_NETWORK_XFRM 549 /* If neither has a context --> match 550 * Otherwise, both must have a context and the sids, doi, alg must match 551 */ 552 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) 553 { 554 return ((!s1 && !s2) || 555 (s1 && s2 && 556 (s1->ctx_sid == s2->ctx_sid) && 557 (s1->ctx_doi == s2->ctx_doi) && 558 (s1->ctx_alg == s2->ctx_alg))); 559 } 560 #else 561 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) 562 { 563 return 1; 564 } 565 #endif 566 567 /* A struct encoding bundle of transformations to apply to some set of flow. 568 * 569 * dst->child points to the next element of bundle. 570 * dst->xfrm points to an instanse of transformer. 571 * 572 * Due to unfortunate limitations of current routing cache, which we 573 * have no time to fix, it mirrors struct rtable and bound to the same 574 * routing key, including saddr,daddr. However, we can have many of 575 * bundles differing by session id. All the bundles grow from a parent 576 * policy rule. 577 */ 578 struct xfrm_dst 579 { 580 union { 581 struct xfrm_dst *next; 582 struct dst_entry dst; 583 struct rtable rt; 584 struct rt6_info rt6; 585 } u; 586 struct dst_entry *route; 587 #ifdef CONFIG_XFRM_SUB_POLICY 588 struct flowi *origin; 589 struct xfrm_selector *partner; 590 #endif 591 u32 genid; 592 u32 route_mtu_cached; 593 u32 child_mtu_cached; 594 u32 route_cookie; 595 u32 path_cookie; 596 }; 597 598 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) 599 { 600 dst_release(xdst->route); 601 if (likely(xdst->u.dst.xfrm)) 602 xfrm_state_put(xdst->u.dst.xfrm); 603 #ifdef CONFIG_XFRM_SUB_POLICY 604 kfree(xdst->origin); 605 xdst->origin = NULL; 606 kfree(xdst->partner); 607 xdst->partner = NULL; 608 #endif 609 } 610 611 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 612 613 struct sec_path 614 { 615 atomic_t refcnt; 616 int len; 617 struct xfrm_state *xvec[XFRM_MAX_DEPTH]; 618 }; 619 620 static inline struct sec_path * 621 secpath_get(struct sec_path *sp) 622 { 623 if (sp) 624 atomic_inc(&sp->refcnt); 625 return sp; 626 } 627 628 extern void __secpath_destroy(struct sec_path *sp); 629 630 static inline void 631 secpath_put(struct sec_path *sp) 632 { 633 if (sp && atomic_dec_and_test(&sp->refcnt)) 634 __secpath_destroy(sp); 635 } 636 637 extern struct sec_path *secpath_dup(struct sec_path *src); 638 639 static inline void 640 secpath_reset(struct sk_buff *skb) 641 { 642 #ifdef CONFIG_XFRM 643 secpath_put(skb->sp); 644 skb->sp = NULL; 645 #endif 646 } 647 648 static inline int 649 xfrm_addr_any(xfrm_address_t *addr, unsigned short family) 650 { 651 switch (family) { 652 case AF_INET: 653 return addr->a4 == 0; 654 case AF_INET6: 655 return ipv6_addr_any((struct in6_addr *)&addr->a6); 656 } 657 return 0; 658 } 659 660 static inline int 661 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x) 662 { 663 return (tmpl->saddr.a4 && 664 tmpl->saddr.a4 != x->props.saddr.a4); 665 } 666 667 static inline int 668 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x) 669 { 670 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) && 671 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); 672 } 673 674 static inline int 675 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family) 676 { 677 switch (family) { 678 case AF_INET: 679 return __xfrm4_state_addr_cmp(tmpl, x); 680 case AF_INET6: 681 return __xfrm6_state_addr_cmp(tmpl, x); 682 } 683 return !0; 684 } 685 686 #ifdef CONFIG_XFRM 687 688 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family); 689 690 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) 691 { 692 if (sk && sk->sk_policy[XFRM_POLICY_IN]) 693 return __xfrm_policy_check(sk, dir, skb, family); 694 695 return (!xfrm_policy_count[dir] && !skb->sp) || 696 (skb->dst->flags & DST_NOPOLICY) || 697 __xfrm_policy_check(sk, dir, skb, family); 698 } 699 700 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 701 { 702 return xfrm_policy_check(sk, dir, skb, AF_INET); 703 } 704 705 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 706 { 707 return xfrm_policy_check(sk, dir, skb, AF_INET6); 708 } 709 710 extern int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family); 711 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); 712 713 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) 714 { 715 return !xfrm_policy_count[XFRM_POLICY_OUT] || 716 (skb->dst->flags & DST_NOXFRM) || 717 __xfrm_route_forward(skb, family); 718 } 719 720 static inline int xfrm4_route_forward(struct sk_buff *skb) 721 { 722 return xfrm_route_forward(skb, AF_INET); 723 } 724 725 static inline int xfrm6_route_forward(struct sk_buff *skb) 726 { 727 return xfrm_route_forward(skb, AF_INET6); 728 } 729 730 extern int __xfrm_sk_clone_policy(struct sock *sk); 731 732 static inline int xfrm_sk_clone_policy(struct sock *sk) 733 { 734 if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) 735 return __xfrm_sk_clone_policy(sk); 736 return 0; 737 } 738 739 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir); 740 741 static inline void xfrm_sk_free_policy(struct sock *sk) 742 { 743 if (unlikely(sk->sk_policy[0] != NULL)) { 744 xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); 745 sk->sk_policy[0] = NULL; 746 } 747 if (unlikely(sk->sk_policy[1] != NULL)) { 748 xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); 749 sk->sk_policy[1] = NULL; 750 } 751 } 752 753 #else 754 755 static inline void xfrm_sk_free_policy(struct sock *sk) {} 756 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } 757 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } 758 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } 759 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 760 { 761 return 1; 762 } 763 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 764 { 765 return 1; 766 } 767 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) 768 { 769 return 1; 770 } 771 #endif 772 773 static __inline__ 774 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family) 775 { 776 switch (family){ 777 case AF_INET: 778 return (xfrm_address_t *)&fl->fl4_dst; 779 case AF_INET6: 780 return (xfrm_address_t *)&fl->fl6_dst; 781 } 782 return NULL; 783 } 784 785 static __inline__ 786 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family) 787 { 788 switch (family){ 789 case AF_INET: 790 return (xfrm_address_t *)&fl->fl4_src; 791 case AF_INET6: 792 return (xfrm_address_t *)&fl->fl6_src; 793 } 794 return NULL; 795 } 796 797 static __inline__ int 798 __xfrm4_state_addr_check(struct xfrm_state *x, 799 xfrm_address_t *daddr, xfrm_address_t *saddr) 800 { 801 if (daddr->a4 == x->id.daddr.a4 && 802 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4)) 803 return 1; 804 return 0; 805 } 806 807 static __inline__ int 808 __xfrm6_state_addr_check(struct xfrm_state *x, 809 xfrm_address_t *daddr, xfrm_address_t *saddr) 810 { 811 if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && 812 (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)|| 813 ipv6_addr_any((struct in6_addr *)saddr) || 814 ipv6_addr_any((struct in6_addr *)&x->props.saddr))) 815 return 1; 816 return 0; 817 } 818 819 static __inline__ int 820 xfrm_state_addr_check(struct xfrm_state *x, 821 xfrm_address_t *daddr, xfrm_address_t *saddr, 822 unsigned short family) 823 { 824 switch (family) { 825 case AF_INET: 826 return __xfrm4_state_addr_check(x, daddr, saddr); 827 case AF_INET6: 828 return __xfrm6_state_addr_check(x, daddr, saddr); 829 } 830 return 0; 831 } 832 833 static __inline__ int 834 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl, 835 unsigned short family) 836 { 837 switch (family) { 838 case AF_INET: 839 return __xfrm4_state_addr_check(x, 840 (xfrm_address_t *)&fl->fl4_dst, 841 (xfrm_address_t *)&fl->fl4_src); 842 case AF_INET6: 843 return __xfrm6_state_addr_check(x, 844 (xfrm_address_t *)&fl->fl6_dst, 845 (xfrm_address_t *)&fl->fl6_src); 846 } 847 return 0; 848 } 849 850 static inline int xfrm_state_kern(struct xfrm_state *x) 851 { 852 return atomic_read(&x->tunnel_users); 853 } 854 855 static inline int xfrm_id_proto_match(u8 proto, u8 userproto) 856 { 857 return (!userproto || proto == userproto || 858 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH || 859 proto == IPPROTO_ESP || 860 proto == IPPROTO_COMP))); 861 } 862 863 /* 864 * xfrm algorithm information 865 */ 866 struct xfrm_algo_auth_info { 867 u16 icv_truncbits; 868 u16 icv_fullbits; 869 }; 870 871 struct xfrm_algo_encr_info { 872 u16 blockbits; 873 u16 defkeybits; 874 }; 875 876 struct xfrm_algo_comp_info { 877 u16 threshold; 878 }; 879 880 struct xfrm_algo_desc { 881 char *name; 882 char *compat; 883 u8 available:1; 884 union { 885 struct xfrm_algo_auth_info auth; 886 struct xfrm_algo_encr_info encr; 887 struct xfrm_algo_comp_info comp; 888 } uinfo; 889 struct sadb_alg desc; 890 }; 891 892 /* XFRM tunnel handlers. */ 893 struct xfrm_tunnel { 894 int (*handler)(struct sk_buff *skb); 895 int (*err_handler)(struct sk_buff *skb, __u32 info); 896 897 struct xfrm_tunnel *next; 898 int priority; 899 }; 900 901 struct xfrm6_tunnel { 902 int (*handler)(struct sk_buff *skb); 903 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 904 int type, int code, int offset, __be32 info); 905 struct xfrm6_tunnel *next; 906 int priority; 907 }; 908 909 extern void xfrm_init(void); 910 extern void xfrm4_init(void); 911 extern void xfrm6_init(void); 912 extern void xfrm6_fini(void); 913 extern void xfrm_state_init(void); 914 extern void xfrm4_state_init(void); 915 extern void xfrm6_state_init(void); 916 extern void xfrm6_state_fini(void); 917 918 extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *); 919 extern struct xfrm_state *xfrm_state_alloc(void); 920 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, 921 struct flowi *fl, struct xfrm_tmpl *tmpl, 922 struct xfrm_policy *pol, int *err, 923 unsigned short family); 924 extern int xfrm_state_check_expire(struct xfrm_state *x); 925 extern void xfrm_state_insert(struct xfrm_state *x); 926 extern int xfrm_state_add(struct xfrm_state *x); 927 extern int xfrm_state_update(struct xfrm_state *x); 928 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family); 929 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family); 930 #ifdef CONFIG_XFRM_SUB_POLICY 931 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, 932 int n, unsigned short family); 933 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, 934 int n, unsigned short family); 935 #else 936 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, 937 int n, unsigned short family) 938 { 939 return -ENOSYS; 940 } 941 942 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, 943 int n, unsigned short family) 944 { 945 return -ENOSYS; 946 } 947 #endif 948 949 struct xfrmk_sadinfo { 950 u32 sadhcnt; /* current hash bkts */ 951 u32 sadhmcnt; /* max allowed hash bkts */ 952 u32 sadcnt; /* current running count */ 953 }; 954 955 struct xfrmk_spdinfo { 956 u32 incnt; 957 u32 outcnt; 958 u32 fwdcnt; 959 u32 inscnt; 960 u32 outscnt; 961 u32 fwdscnt; 962 u32 spdhcnt; 963 u32 spdhmcnt; 964 }; 965 966 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); 967 extern int xfrm_state_delete(struct xfrm_state *x); 968 extern void xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info); 969 extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); 970 extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); 971 extern int xfrm_replay_check(struct xfrm_state *x, __be32 seq); 972 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq); 973 extern void xfrm_replay_notify(struct xfrm_state *x, int event); 974 extern int xfrm_state_check(struct xfrm_state *x, struct sk_buff *skb); 975 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu); 976 extern int xfrm_init_state(struct xfrm_state *x); 977 extern int xfrm4_rcv(struct sk_buff *skb); 978 extern int xfrm4_output(struct sk_buff *skb); 979 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); 980 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 981 extern int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi); 982 extern int xfrm6_rcv(struct sk_buff **pskb); 983 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 984 xfrm_address_t *saddr, u8 proto); 985 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); 986 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); 987 extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr); 988 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr); 989 extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr); 990 extern int xfrm6_output(struct sk_buff *skb); 991 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, 992 u8 **prevhdr); 993 994 #ifdef CONFIG_XFRM 995 extern int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type); 996 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen); 997 extern int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family); 998 #else 999 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) 1000 { 1001 return -ENOPROTOOPT; 1002 } 1003 1004 static inline int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) 1005 { 1006 /* should not happen */ 1007 kfree_skb(skb); 1008 return 0; 1009 } 1010 static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsigned short family) 1011 { 1012 return -EINVAL; 1013 } 1014 #endif 1015 1016 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp); 1017 extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *); 1018 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 1019 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir, 1020 struct xfrm_selector *sel, 1021 struct xfrm_sec_ctx *ctx, int delete, 1022 int *err); 1023 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err); 1024 void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info); 1025 u32 xfrm_get_acqseq(void); 1026 void xfrm_alloc_spi(struct xfrm_state *x, __be32 minspi, __be32 maxspi); 1027 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto, 1028 xfrm_address_t *daddr, xfrm_address_t *saddr, 1029 int create, unsigned short family); 1030 extern void xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info); 1031 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 1032 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, 1033 struct flowi *fl, int family, int strict); 1034 extern void xfrm_init_pmtu(struct dst_entry *dst); 1035 1036 #ifdef CONFIG_XFRM_MIGRATE 1037 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1038 struct xfrm_migrate *m, int num_bundles); 1039 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m); 1040 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x, 1041 struct xfrm_migrate *m); 1042 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type, 1043 struct xfrm_migrate *m, int num_bundles); 1044 #endif 1045 1046 extern wait_queue_head_t km_waitq; 1047 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 1048 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid); 1049 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); 1050 1051 extern void xfrm_input_init(void); 1052 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); 1053 1054 extern void xfrm_probe_algs(void); 1055 extern int xfrm_count_auth_supported(void); 1056 extern int xfrm_count_enc_supported(void); 1057 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx); 1058 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx); 1059 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id); 1060 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id); 1061 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id); 1062 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe); 1063 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe); 1064 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe); 1065 1066 struct hash_desc; 1067 struct scatterlist; 1068 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *, 1069 unsigned int); 1070 1071 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm, 1072 int offset, int len, icv_update_fn_t icv_update); 1073 1074 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b, 1075 int family) 1076 { 1077 switch (family) { 1078 default: 1079 case AF_INET: 1080 return (__force __u32)a->a4 - (__force __u32)b->a4; 1081 case AF_INET6: 1082 return ipv6_addr_cmp((struct in6_addr *)a, 1083 (struct in6_addr *)b); 1084 } 1085 } 1086 1087 static inline int xfrm_policy_id2dir(u32 index) 1088 { 1089 return index & 7; 1090 } 1091 1092 static inline int xfrm_aevent_is_on(void) 1093 { 1094 struct sock *nlsk; 1095 int ret = 0; 1096 1097 rcu_read_lock(); 1098 nlsk = rcu_dereference(xfrm_nl); 1099 if (nlsk) 1100 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS); 1101 rcu_read_unlock(); 1102 return ret; 1103 } 1104 1105 static inline void xfrm_aevent_doreplay(struct xfrm_state *x) 1106 { 1107 if (xfrm_aevent_is_on()) 1108 xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); 1109 } 1110 1111 #ifdef CONFIG_XFRM_MIGRATE 1112 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) 1113 { 1114 return (struct xfrm_algo *)kmemdup(orig, sizeof(*orig) + orig->alg_key_len, GFP_KERNEL); 1115 } 1116 1117 static inline void xfrm_states_put(struct xfrm_state **states, int n) 1118 { 1119 int i; 1120 for (i = 0; i < n; i++) 1121 xfrm_state_put(*(states + i)); 1122 } 1123 1124 static inline void xfrm_states_delete(struct xfrm_state **states, int n) 1125 { 1126 int i; 1127 for (i = 0; i < n; i++) 1128 xfrm_state_delete(*(states + i)); 1129 } 1130 #endif 1131 1132 #endif /* _NET_XFRM_H */ 1133