1 #define pr_fmt(fmt) "IPsec: " fmt 2 3 #include <crypto/hash.h> 4 #include <linux/err.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <net/ip.h> 8 #include <net/xfrm.h> 9 #include <net/ah.h> 10 #include <linux/crypto.h> 11 #include <linux/pfkeyv2.h> 12 #include <linux/scatterlist.h> 13 #include <net/icmp.h> 14 #include <net/protocol.h> 15 16 struct ah_skb_cb { 17 struct xfrm_skb_cb xfrm; 18 void *tmp; 19 }; 20 21 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0])) 22 23 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags, 24 unsigned int size) 25 { 26 unsigned int len; 27 28 len = size + crypto_ahash_digestsize(ahash) + 29 (crypto_ahash_alignmask(ahash) & 30 ~(crypto_tfm_ctx_alignment() - 1)); 31 32 len = ALIGN(len, crypto_tfm_ctx_alignment()); 33 34 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash); 35 len = ALIGN(len, __alignof__(struct scatterlist)); 36 37 len += sizeof(struct scatterlist) * nfrags; 38 39 return kmalloc(len, GFP_ATOMIC); 40 } 41 42 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset) 43 { 44 return tmp + offset; 45 } 46 47 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp, 48 unsigned int offset) 49 { 50 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1); 51 } 52 53 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash, 54 u8 *icv) 55 { 56 struct ahash_request *req; 57 58 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash), 59 crypto_tfm_ctx_alignment()); 60 61 ahash_request_set_tfm(req, ahash); 62 63 return req; 64 } 65 66 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash, 67 struct ahash_request *req) 68 { 69 return (void *)ALIGN((unsigned long)(req + 1) + 70 crypto_ahash_reqsize(ahash), 71 __alignof__(struct scatterlist)); 72 } 73 74 /* Clear mutable options and find final destination to substitute 75 * into IP header for icv calculation. Options are already checked 76 * for validity, so paranoia is not required. */ 77 78 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) 79 { 80 unsigned char *optptr = (unsigned char *)(iph+1); 81 int l = iph->ihl*4 - sizeof(struct iphdr); 82 int optlen; 83 84 while (l > 0) { 85 switch (*optptr) { 86 case IPOPT_END: 87 return 0; 88 case IPOPT_NOOP: 89 l--; 90 optptr++; 91 continue; 92 } 93 optlen = optptr[1]; 94 if (optlen<2 || optlen>l) 95 return -EINVAL; 96 switch (*optptr) { 97 case IPOPT_SEC: 98 case 0x85: /* Some "Extended Security" crap. */ 99 case IPOPT_CIPSO: 100 case IPOPT_RA: 101 case 0x80|21: /* RFC1770 */ 102 break; 103 case IPOPT_LSRR: 104 case IPOPT_SSRR: 105 if (optlen < 6) 106 return -EINVAL; 107 memcpy(daddr, optptr+optlen-4, 4); 108 /* Fall through */ 109 default: 110 memset(optptr, 0, optlen); 111 } 112 l -= optlen; 113 optptr += optlen; 114 } 115 return 0; 116 } 117 118 static void ah_output_done(struct crypto_async_request *base, int err) 119 { 120 u8 *icv; 121 struct iphdr *iph; 122 struct sk_buff *skb = base->data; 123 struct xfrm_state *x = skb_dst(skb)->xfrm; 124 struct ah_data *ahp = x->data; 125 struct iphdr *top_iph = ip_hdr(skb); 126 struct ip_auth_hdr *ah = ip_auth_hdr(skb); 127 int ihl = ip_hdrlen(skb); 128 129 iph = AH_SKB_CB(skb)->tmp; 130 icv = ah_tmp_icv(ahp->ahash, iph, ihl); 131 memcpy(ah->auth_data, icv, ahp->icv_trunc_len); 132 133 top_iph->tos = iph->tos; 134 top_iph->ttl = iph->ttl; 135 top_iph->frag_off = iph->frag_off; 136 if (top_iph->ihl != 5) { 137 top_iph->daddr = iph->daddr; 138 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); 139 } 140 141 kfree(AH_SKB_CB(skb)->tmp); 142 xfrm_output_resume(skb, err); 143 } 144 145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb) 146 { 147 int err; 148 int nfrags; 149 int ihl; 150 u8 *icv; 151 struct sk_buff *trailer; 152 struct crypto_ahash *ahash; 153 struct ahash_request *req; 154 struct scatterlist *sg; 155 struct iphdr *iph, *top_iph; 156 struct ip_auth_hdr *ah; 157 struct ah_data *ahp; 158 int seqhi_len = 0; 159 __be32 *seqhi; 160 int sglists = 0; 161 struct scatterlist *seqhisg; 162 163 ahp = x->data; 164 ahash = ahp->ahash; 165 166 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 167 goto out; 168 nfrags = err; 169 170 skb_push(skb, -skb_network_offset(skb)); 171 ah = ip_auth_hdr(skb); 172 ihl = ip_hdrlen(skb); 173 174 if (x->props.flags & XFRM_STATE_ESN) { 175 sglists = 1; 176 seqhi_len = sizeof(*seqhi); 177 } 178 err = -ENOMEM; 179 iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len); 180 if (!iph) 181 goto out; 182 seqhi = (__be32 *)((char *)iph + ihl); 183 icv = ah_tmp_icv(ahash, seqhi, seqhi_len); 184 req = ah_tmp_req(ahash, icv); 185 sg = ah_req_sg(ahash, req); 186 seqhisg = sg + nfrags; 187 188 memset(ah->auth_data, 0, ahp->icv_trunc_len); 189 190 top_iph = ip_hdr(skb); 191 192 iph->tos = top_iph->tos; 193 iph->ttl = top_iph->ttl; 194 iph->frag_off = top_iph->frag_off; 195 196 if (top_iph->ihl != 5) { 197 iph->daddr = top_iph->daddr; 198 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); 199 err = ip_clear_mutable_options(top_iph, &top_iph->daddr); 200 if (err) 201 goto out_free; 202 } 203 204 ah->nexthdr = *skb_mac_header(skb); 205 *skb_mac_header(skb) = IPPROTO_AH; 206 207 top_iph->tos = 0; 208 top_iph->tot_len = htons(skb->len); 209 top_iph->frag_off = 0; 210 top_iph->ttl = 0; 211 top_iph->check = 0; 212 213 if (x->props.flags & XFRM_STATE_ALIGN4) 214 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; 215 else 216 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2; 217 218 ah->reserved = 0; 219 ah->spi = x->id.spi; 220 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 221 222 sg_init_table(sg, nfrags + sglists); 223 skb_to_sgvec_nomark(skb, sg, 0, skb->len); 224 225 if (x->props.flags & XFRM_STATE_ESN) { 226 /* Attach seqhi sg right after packet payload */ 227 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 228 sg_set_buf(seqhisg, seqhi, seqhi_len); 229 } 230 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); 231 ahash_request_set_callback(req, 0, ah_output_done, skb); 232 233 AH_SKB_CB(skb)->tmp = iph; 234 235 err = crypto_ahash_digest(req); 236 if (err) { 237 if (err == -EINPROGRESS) 238 goto out; 239 240 if (err == -EBUSY) 241 err = NET_XMIT_DROP; 242 goto out_free; 243 } 244 245 memcpy(ah->auth_data, icv, ahp->icv_trunc_len); 246 247 top_iph->tos = iph->tos; 248 top_iph->ttl = iph->ttl; 249 top_iph->frag_off = iph->frag_off; 250 if (top_iph->ihl != 5) { 251 top_iph->daddr = iph->daddr; 252 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); 253 } 254 255 out_free: 256 kfree(iph); 257 out: 258 return err; 259 } 260 261 static void ah_input_done(struct crypto_async_request *base, int err) 262 { 263 u8 *auth_data; 264 u8 *icv; 265 struct iphdr *work_iph; 266 struct sk_buff *skb = base->data; 267 struct xfrm_state *x = xfrm_input_state(skb); 268 struct ah_data *ahp = x->data; 269 struct ip_auth_hdr *ah = ip_auth_hdr(skb); 270 int ihl = ip_hdrlen(skb); 271 int ah_hlen = (ah->hdrlen + 2) << 2; 272 273 work_iph = AH_SKB_CB(skb)->tmp; 274 auth_data = ah_tmp_auth(work_iph, ihl); 275 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len); 276 277 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; 278 if (err) 279 goto out; 280 281 err = ah->nexthdr; 282 283 skb->network_header += ah_hlen; 284 memcpy(skb_network_header(skb), work_iph, ihl); 285 __skb_pull(skb, ah_hlen + ihl); 286 287 if (x->props.mode == XFRM_MODE_TUNNEL) 288 skb_reset_transport_header(skb); 289 else 290 skb_set_transport_header(skb, -ihl); 291 out: 292 kfree(AH_SKB_CB(skb)->tmp); 293 xfrm_input_resume(skb, err); 294 } 295 296 static int ah_input(struct xfrm_state *x, struct sk_buff *skb) 297 { 298 int ah_hlen; 299 int ihl; 300 int nexthdr; 301 int nfrags; 302 u8 *auth_data; 303 u8 *icv; 304 struct sk_buff *trailer; 305 struct crypto_ahash *ahash; 306 struct ahash_request *req; 307 struct scatterlist *sg; 308 struct iphdr *iph, *work_iph; 309 struct ip_auth_hdr *ah; 310 struct ah_data *ahp; 311 int err = -ENOMEM; 312 int seqhi_len = 0; 313 __be32 *seqhi; 314 int sglists = 0; 315 struct scatterlist *seqhisg; 316 317 if (!pskb_may_pull(skb, sizeof(*ah))) 318 goto out; 319 320 ah = (struct ip_auth_hdr *)skb->data; 321 ahp = x->data; 322 ahash = ahp->ahash; 323 324 nexthdr = ah->nexthdr; 325 ah_hlen = (ah->hdrlen + 2) << 2; 326 327 if (x->props.flags & XFRM_STATE_ALIGN4) { 328 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) && 329 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len)) 330 goto out; 331 } else { 332 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) && 333 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len)) 334 goto out; 335 } 336 337 if (!pskb_may_pull(skb, ah_hlen)) 338 goto out; 339 340 /* We are going to _remove_ AH header to keep sockets happy, 341 * so... Later this can change. */ 342 if (skb_unclone(skb, GFP_ATOMIC)) 343 goto out; 344 345 skb->ip_summed = CHECKSUM_NONE; 346 347 348 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 349 goto out; 350 nfrags = err; 351 352 ah = (struct ip_auth_hdr *)skb->data; 353 iph = ip_hdr(skb); 354 ihl = ip_hdrlen(skb); 355 356 if (x->props.flags & XFRM_STATE_ESN) { 357 sglists = 1; 358 seqhi_len = sizeof(*seqhi); 359 } 360 361 work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + 362 ahp->icv_trunc_len + seqhi_len); 363 if (!work_iph) 364 goto out; 365 366 seqhi = (__be32 *)((char *)work_iph + ihl); 367 auth_data = ah_tmp_auth(seqhi, seqhi_len); 368 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len); 369 req = ah_tmp_req(ahash, icv); 370 sg = ah_req_sg(ahash, req); 371 seqhisg = sg + nfrags; 372 373 memcpy(work_iph, iph, ihl); 374 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len); 375 memset(ah->auth_data, 0, ahp->icv_trunc_len); 376 377 iph->ttl = 0; 378 iph->tos = 0; 379 iph->frag_off = 0; 380 iph->check = 0; 381 if (ihl > sizeof(*iph)) { 382 __be32 dummy; 383 err = ip_clear_mutable_options(iph, &dummy); 384 if (err) 385 goto out_free; 386 } 387 388 skb_push(skb, ihl); 389 390 sg_init_table(sg, nfrags + sglists); 391 skb_to_sgvec_nomark(skb, sg, 0, skb->len); 392 393 if (x->props.flags & XFRM_STATE_ESN) { 394 /* Attach seqhi sg right after packet payload */ 395 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi; 396 sg_set_buf(seqhisg, seqhi, seqhi_len); 397 } 398 ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len); 399 ahash_request_set_callback(req, 0, ah_input_done, skb); 400 401 AH_SKB_CB(skb)->tmp = work_iph; 402 403 err = crypto_ahash_digest(req); 404 if (err) { 405 if (err == -EINPROGRESS) 406 goto out; 407 408 goto out_free; 409 } 410 411 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0; 412 if (err) 413 goto out_free; 414 415 skb->network_header += ah_hlen; 416 memcpy(skb_network_header(skb), work_iph, ihl); 417 __skb_pull(skb, ah_hlen + ihl); 418 if (x->props.mode == XFRM_MODE_TUNNEL) 419 skb_reset_transport_header(skb); 420 else 421 skb_set_transport_header(skb, -ihl); 422 423 err = nexthdr; 424 425 out_free: 426 kfree (work_iph); 427 out: 428 return err; 429 } 430 431 static int ah4_err(struct sk_buff *skb, u32 info) 432 { 433 struct net *net = dev_net(skb->dev); 434 const struct iphdr *iph = (const struct iphdr *)skb->data; 435 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 436 struct xfrm_state *x; 437 438 switch (icmp_hdr(skb)->type) { 439 case ICMP_DEST_UNREACH: 440 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 441 return 0; 442 case ICMP_REDIRECT: 443 break; 444 default: 445 return 0; 446 } 447 448 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, 449 ah->spi, IPPROTO_AH, AF_INET); 450 if (!x) 451 return 0; 452 453 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 454 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0); 455 else 456 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0); 457 xfrm_state_put(x); 458 459 return 0; 460 } 461 462 static int ah_init_state(struct xfrm_state *x) 463 { 464 struct ah_data *ahp = NULL; 465 struct xfrm_algo_desc *aalg_desc; 466 struct crypto_ahash *ahash; 467 468 if (!x->aalg) 469 goto error; 470 471 if (x->encap) 472 goto error; 473 474 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); 475 if (!ahp) 476 return -ENOMEM; 477 478 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0); 479 if (IS_ERR(ahash)) 480 goto error; 481 482 ahp->ahash = ahash; 483 if (crypto_ahash_setkey(ahash, x->aalg->alg_key, 484 (x->aalg->alg_key_len + 7) / 8)) 485 goto error; 486 487 /* 488 * Lookup the algorithm description maintained by xfrm_algo, 489 * verify crypto transform properties, and store information 490 * we need for AH processing. This lookup cannot fail here 491 * after a successful crypto_alloc_ahash(). 492 */ 493 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); 494 BUG_ON(!aalg_desc); 495 496 if (aalg_desc->uinfo.auth.icv_fullbits/8 != 497 crypto_ahash_digestsize(ahash)) { 498 pr_info("%s: %s digestsize %u != %hu\n", 499 __func__, x->aalg->alg_name, 500 crypto_ahash_digestsize(ahash), 501 aalg_desc->uinfo.auth.icv_fullbits / 8); 502 goto error; 503 } 504 505 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; 506 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8; 507 508 if (x->props.flags & XFRM_STATE_ALIGN4) 509 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) + 510 ahp->icv_trunc_len); 511 else 512 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + 513 ahp->icv_trunc_len); 514 if (x->props.mode == XFRM_MODE_TUNNEL) 515 x->props.header_len += sizeof(struct iphdr); 516 x->data = ahp; 517 518 return 0; 519 520 error: 521 if (ahp) { 522 crypto_free_ahash(ahp->ahash); 523 kfree(ahp); 524 } 525 return -EINVAL; 526 } 527 528 static void ah_destroy(struct xfrm_state *x) 529 { 530 struct ah_data *ahp = x->data; 531 532 if (!ahp) 533 return; 534 535 crypto_free_ahash(ahp->ahash); 536 kfree(ahp); 537 } 538 539 static int ah4_rcv_cb(struct sk_buff *skb, int err) 540 { 541 return 0; 542 } 543 544 static const struct xfrm_type ah_type = 545 { 546 .description = "AH4", 547 .owner = THIS_MODULE, 548 .proto = IPPROTO_AH, 549 .flags = XFRM_TYPE_REPLAY_PROT, 550 .init_state = ah_init_state, 551 .destructor = ah_destroy, 552 .input = ah_input, 553 .output = ah_output 554 }; 555 556 static struct xfrm4_protocol ah4_protocol = { 557 .handler = xfrm4_rcv, 558 .input_handler = xfrm_input, 559 .cb_handler = ah4_rcv_cb, 560 .err_handler = ah4_err, 561 .priority = 0, 562 }; 563 564 static int __init ah4_init(void) 565 { 566 if (xfrm_register_type(&ah_type, AF_INET) < 0) { 567 pr_info("%s: can't add xfrm type\n", __func__); 568 return -EAGAIN; 569 } 570 if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) { 571 pr_info("%s: can't add protocol\n", __func__); 572 xfrm_unregister_type(&ah_type, AF_INET); 573 return -EAGAIN; 574 } 575 return 0; 576 } 577 578 static void __exit ah4_fini(void) 579 { 580 if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0) 581 pr_info("%s: can't remove protocol\n", __func__); 582 if (xfrm_unregister_type(&ah_type, AF_INET) < 0) 583 pr_info("%s: can't remove xfrm type\n", __func__); 584 } 585 586 module_init(ah4_init); 587 module_exit(ah4_fini); 588 MODULE_LICENSE("GPL"); 589 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH); 590