1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <crypto/algapi.h> 38 #include <crypto/hash.h> 39 #include <crypto/skcipher.h> 40 #include <linux/err.h> 41 #include <linux/types.h> 42 #include <linux/mm.h> 43 #include <linux/scatterlist.h> 44 #include <linux/highmem.h> 45 #include <linux/pagemap.h> 46 #include <linux/random.h> 47 #include <linux/sunrpc/gss_krb5.h> 48 #include <linux/sunrpc/xdr.h> 49 50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 51 # define RPCDBG_FACILITY RPCDBG_AUTH 52 #endif 53 54 u32 55 krb5_encrypt( 56 struct crypto_skcipher *tfm, 57 void * iv, 58 void * in, 59 void * out, 60 int length) 61 { 62 u32 ret = -EINVAL; 63 struct scatterlist sg[1]; 64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 65 SKCIPHER_REQUEST_ON_STACK(req, tfm); 66 67 if (length % crypto_skcipher_blocksize(tfm) != 0) 68 goto out; 69 70 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 72 crypto_skcipher_ivsize(tfm)); 73 goto out; 74 } 75 76 if (iv) 77 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 78 79 memcpy(out, in, length); 80 sg_init_one(sg, out, length); 81 82 skcipher_request_set_tfm(req, tfm); 83 skcipher_request_set_callback(req, 0, NULL, NULL); 84 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 85 86 ret = crypto_skcipher_encrypt(req); 87 skcipher_request_zero(req); 88 out: 89 dprintk("RPC: krb5_encrypt returns %d\n", ret); 90 return ret; 91 } 92 93 u32 94 krb5_decrypt( 95 struct crypto_skcipher *tfm, 96 void * iv, 97 void * in, 98 void * out, 99 int length) 100 { 101 u32 ret = -EINVAL; 102 struct scatterlist sg[1]; 103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 104 SKCIPHER_REQUEST_ON_STACK(req, tfm); 105 106 if (length % crypto_skcipher_blocksize(tfm) != 0) 107 goto out; 108 109 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 111 crypto_skcipher_ivsize(tfm)); 112 goto out; 113 } 114 if (iv) 115 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 116 117 memcpy(out, in, length); 118 sg_init_one(sg, out, length); 119 120 skcipher_request_set_tfm(req, tfm); 121 skcipher_request_set_callback(req, 0, NULL, NULL); 122 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 123 124 ret = crypto_skcipher_decrypt(req); 125 skcipher_request_zero(req); 126 out: 127 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 128 return ret; 129 } 130 131 static int 132 checksummer(struct scatterlist *sg, void *data) 133 { 134 struct ahash_request *req = data; 135 136 ahash_request_set_crypt(req, sg, NULL, sg->length); 137 138 return crypto_ahash_update(req); 139 } 140 141 static int 142 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) 143 { 144 unsigned int ms_usage; 145 146 switch (usage) { 147 case KG_USAGE_SIGN: 148 ms_usage = 15; 149 break; 150 case KG_USAGE_SEAL: 151 ms_usage = 13; 152 break; 153 default: 154 return -EINVAL; 155 } 156 salt[0] = (ms_usage >> 0) & 0xff; 157 salt[1] = (ms_usage >> 8) & 0xff; 158 salt[2] = (ms_usage >> 16) & 0xff; 159 salt[3] = (ms_usage >> 24) & 0xff; 160 161 return 0; 162 } 163 164 static u32 165 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, 166 struct xdr_buf *body, int body_offset, u8 *cksumkey, 167 unsigned int usage, struct xdr_netobj *cksumout) 168 { 169 struct scatterlist sg[1]; 170 int err = -1; 171 u8 *checksumdata; 172 u8 *rc4salt; 173 struct crypto_ahash *md5; 174 struct crypto_ahash *hmac_md5; 175 struct ahash_request *req; 176 177 if (cksumkey == NULL) 178 return GSS_S_FAILURE; 179 180 if (cksumout->len < kctx->gk5e->cksumlength) { 181 dprintk("%s: checksum buffer length, %u, too small for %s\n", 182 __func__, cksumout->len, kctx->gk5e->name); 183 return GSS_S_FAILURE; 184 } 185 186 rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS); 187 if (!rc4salt) 188 return GSS_S_FAILURE; 189 190 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { 191 dprintk("%s: invalid usage value %u\n", __func__, usage); 192 goto out_free_rc4salt; 193 } 194 195 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 196 if (!checksumdata) 197 goto out_free_rc4salt; 198 199 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 200 if (IS_ERR(md5)) 201 goto out_free_cksum; 202 203 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 204 CRYPTO_ALG_ASYNC); 205 if (IS_ERR(hmac_md5)) 206 goto out_free_md5; 207 208 req = ahash_request_alloc(md5, GFP_NOFS); 209 if (!req) 210 goto out_free_hmac_md5; 211 212 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 213 214 err = crypto_ahash_init(req); 215 if (err) 216 goto out; 217 sg_init_one(sg, rc4salt, 4); 218 ahash_request_set_crypt(req, sg, NULL, 4); 219 err = crypto_ahash_update(req); 220 if (err) 221 goto out; 222 223 sg_init_one(sg, header, hdrlen); 224 ahash_request_set_crypt(req, sg, NULL, hdrlen); 225 err = crypto_ahash_update(req); 226 if (err) 227 goto out; 228 err = xdr_process_buf(body, body_offset, body->len - body_offset, 229 checksummer, req); 230 if (err) 231 goto out; 232 ahash_request_set_crypt(req, NULL, checksumdata, 0); 233 err = crypto_ahash_final(req); 234 if (err) 235 goto out; 236 237 ahash_request_free(req); 238 req = ahash_request_alloc(hmac_md5, GFP_NOFS); 239 if (!req) 240 goto out_free_hmac_md5; 241 242 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 243 244 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 245 if (err) 246 goto out; 247 248 sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 249 ahash_request_set_crypt(req, sg, checksumdata, 250 crypto_ahash_digestsize(md5)); 251 err = crypto_ahash_digest(req); 252 if (err) 253 goto out; 254 255 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 256 cksumout->len = kctx->gk5e->cksumlength; 257 out: 258 ahash_request_free(req); 259 out_free_hmac_md5: 260 crypto_free_ahash(hmac_md5); 261 out_free_md5: 262 crypto_free_ahash(md5); 263 out_free_cksum: 264 kfree(checksumdata); 265 out_free_rc4salt: 266 kfree(rc4salt); 267 return err ? GSS_S_FAILURE : 0; 268 } 269 270 /* 271 * checksum the plaintext data and hdrlen bytes of the token header 272 * The checksum is performed over the first 8 bytes of the 273 * gss token header and then over the data body 274 */ 275 u32 276 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, 277 struct xdr_buf *body, int body_offset, u8 *cksumkey, 278 unsigned int usage, struct xdr_netobj *cksumout) 279 { 280 struct crypto_ahash *tfm; 281 struct ahash_request *req; 282 struct scatterlist sg[1]; 283 int err = -1; 284 u8 *checksumdata; 285 unsigned int checksumlen; 286 287 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 288 return make_checksum_hmac_md5(kctx, header, hdrlen, 289 body, body_offset, 290 cksumkey, usage, cksumout); 291 292 if (cksumout->len < kctx->gk5e->cksumlength) { 293 dprintk("%s: checksum buffer length, %u, too small for %s\n", 294 __func__, cksumout->len, kctx->gk5e->name); 295 return GSS_S_FAILURE; 296 } 297 298 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 299 if (checksumdata == NULL) 300 return GSS_S_FAILURE; 301 302 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 303 if (IS_ERR(tfm)) 304 goto out_free_cksum; 305 306 req = ahash_request_alloc(tfm, GFP_NOFS); 307 if (!req) 308 goto out_free_ahash; 309 310 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 311 312 checksumlen = crypto_ahash_digestsize(tfm); 313 314 if (cksumkey != NULL) { 315 err = crypto_ahash_setkey(tfm, cksumkey, 316 kctx->gk5e->keylength); 317 if (err) 318 goto out; 319 } 320 321 err = crypto_ahash_init(req); 322 if (err) 323 goto out; 324 sg_init_one(sg, header, hdrlen); 325 ahash_request_set_crypt(req, sg, NULL, hdrlen); 326 err = crypto_ahash_update(req); 327 if (err) 328 goto out; 329 err = xdr_process_buf(body, body_offset, body->len - body_offset, 330 checksummer, req); 331 if (err) 332 goto out; 333 ahash_request_set_crypt(req, NULL, checksumdata, 0); 334 err = crypto_ahash_final(req); 335 if (err) 336 goto out; 337 338 switch (kctx->gk5e->ctype) { 339 case CKSUMTYPE_RSA_MD5: 340 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, 341 checksumdata, checksumlen); 342 if (err) 343 goto out; 344 memcpy(cksumout->data, 345 checksumdata + checksumlen - kctx->gk5e->cksumlength, 346 kctx->gk5e->cksumlength); 347 break; 348 case CKSUMTYPE_HMAC_SHA1_DES3: 349 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 350 break; 351 default: 352 BUG(); 353 break; 354 } 355 cksumout->len = kctx->gk5e->cksumlength; 356 out: 357 ahash_request_free(req); 358 out_free_ahash: 359 crypto_free_ahash(tfm); 360 out_free_cksum: 361 kfree(checksumdata); 362 return err ? GSS_S_FAILURE : 0; 363 } 364 365 /* 366 * checksum the plaintext data and hdrlen bytes of the token header 367 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data 368 * body then over the first 16 octets of the MIC token 369 * Inclusion of the header data in the calculation of the 370 * checksum is optional. 371 */ 372 u32 373 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, 374 struct xdr_buf *body, int body_offset, u8 *cksumkey, 375 unsigned int usage, struct xdr_netobj *cksumout) 376 { 377 struct crypto_ahash *tfm; 378 struct ahash_request *req; 379 struct scatterlist sg[1]; 380 int err = -1; 381 u8 *checksumdata; 382 383 if (kctx->gk5e->keyed_cksum == 0) { 384 dprintk("%s: expected keyed hash for %s\n", 385 __func__, kctx->gk5e->name); 386 return GSS_S_FAILURE; 387 } 388 if (cksumkey == NULL) { 389 dprintk("%s: no key supplied for %s\n", 390 __func__, kctx->gk5e->name); 391 return GSS_S_FAILURE; 392 } 393 394 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 395 if (!checksumdata) 396 return GSS_S_FAILURE; 397 398 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 399 if (IS_ERR(tfm)) 400 goto out_free_cksum; 401 402 req = ahash_request_alloc(tfm, GFP_NOFS); 403 if (!req) 404 goto out_free_ahash; 405 406 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 407 408 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 409 if (err) 410 goto out; 411 412 err = crypto_ahash_init(req); 413 if (err) 414 goto out; 415 err = xdr_process_buf(body, body_offset, body->len - body_offset, 416 checksummer, req); 417 if (err) 418 goto out; 419 if (header != NULL) { 420 sg_init_one(sg, header, hdrlen); 421 ahash_request_set_crypt(req, sg, NULL, hdrlen); 422 err = crypto_ahash_update(req); 423 if (err) 424 goto out; 425 } 426 ahash_request_set_crypt(req, NULL, checksumdata, 0); 427 err = crypto_ahash_final(req); 428 if (err) 429 goto out; 430 431 cksumout->len = kctx->gk5e->cksumlength; 432 433 switch (kctx->gk5e->ctype) { 434 case CKSUMTYPE_HMAC_SHA1_96_AES128: 435 case CKSUMTYPE_HMAC_SHA1_96_AES256: 436 /* note that this truncates the hash */ 437 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 438 break; 439 default: 440 BUG(); 441 break; 442 } 443 out: 444 ahash_request_free(req); 445 out_free_ahash: 446 crypto_free_ahash(tfm); 447 out_free_cksum: 448 kfree(checksumdata); 449 return err ? GSS_S_FAILURE : 0; 450 } 451 452 struct encryptor_desc { 453 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 454 struct skcipher_request *req; 455 int pos; 456 struct xdr_buf *outbuf; 457 struct page **pages; 458 struct scatterlist infrags[4]; 459 struct scatterlist outfrags[4]; 460 int fragno; 461 int fraglen; 462 }; 463 464 static int 465 encryptor(struct scatterlist *sg, void *data) 466 { 467 struct encryptor_desc *desc = data; 468 struct xdr_buf *outbuf = desc->outbuf; 469 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 470 struct page *in_page; 471 int thislen = desc->fraglen + sg->length; 472 int fraglen, ret; 473 int page_pos; 474 475 /* Worst case is 4 fragments: head, end of page 1, start 476 * of page 2, tail. Anything more is a bug. */ 477 BUG_ON(desc->fragno > 3); 478 479 page_pos = desc->pos - outbuf->head[0].iov_len; 480 if (page_pos >= 0 && page_pos < outbuf->page_len) { 481 /* pages are not in place: */ 482 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; 483 in_page = desc->pages[i]; 484 } else { 485 in_page = sg_page(sg); 486 } 487 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 488 sg->offset); 489 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 490 sg->offset); 491 desc->fragno++; 492 desc->fraglen += sg->length; 493 desc->pos += sg->length; 494 495 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 496 thislen -= fraglen; 497 498 if (thislen == 0) 499 return 0; 500 501 sg_mark_end(&desc->infrags[desc->fragno - 1]); 502 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 503 504 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 505 thislen, desc->iv); 506 507 ret = crypto_skcipher_encrypt(desc->req); 508 if (ret) 509 return ret; 510 511 sg_init_table(desc->infrags, 4); 512 sg_init_table(desc->outfrags, 4); 513 514 if (fraglen) { 515 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 516 sg->offset + sg->length - fraglen); 517 desc->infrags[0] = desc->outfrags[0]; 518 sg_assign_page(&desc->infrags[0], in_page); 519 desc->fragno = 1; 520 desc->fraglen = fraglen; 521 } else { 522 desc->fragno = 0; 523 desc->fraglen = 0; 524 } 525 return 0; 526 } 527 528 int 529 gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 530 int offset, struct page **pages) 531 { 532 int ret; 533 struct encryptor_desc desc; 534 SKCIPHER_REQUEST_ON_STACK(req, tfm); 535 536 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 537 538 skcipher_request_set_tfm(req, tfm); 539 skcipher_request_set_callback(req, 0, NULL, NULL); 540 541 memset(desc.iv, 0, sizeof(desc.iv)); 542 desc.req = req; 543 desc.pos = offset; 544 desc.outbuf = buf; 545 desc.pages = pages; 546 desc.fragno = 0; 547 desc.fraglen = 0; 548 549 sg_init_table(desc.infrags, 4); 550 sg_init_table(desc.outfrags, 4); 551 552 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 553 skcipher_request_zero(req); 554 return ret; 555 } 556 557 struct decryptor_desc { 558 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 559 struct skcipher_request *req; 560 struct scatterlist frags[4]; 561 int fragno; 562 int fraglen; 563 }; 564 565 static int 566 decryptor(struct scatterlist *sg, void *data) 567 { 568 struct decryptor_desc *desc = data; 569 int thislen = desc->fraglen + sg->length; 570 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 571 int fraglen, ret; 572 573 /* Worst case is 4 fragments: head, end of page 1, start 574 * of page 2, tail. Anything more is a bug. */ 575 BUG_ON(desc->fragno > 3); 576 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 577 sg->offset); 578 desc->fragno++; 579 desc->fraglen += sg->length; 580 581 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 582 thislen -= fraglen; 583 584 if (thislen == 0) 585 return 0; 586 587 sg_mark_end(&desc->frags[desc->fragno - 1]); 588 589 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 590 thislen, desc->iv); 591 592 ret = crypto_skcipher_decrypt(desc->req); 593 if (ret) 594 return ret; 595 596 sg_init_table(desc->frags, 4); 597 598 if (fraglen) { 599 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 600 sg->offset + sg->length - fraglen); 601 desc->fragno = 1; 602 desc->fraglen = fraglen; 603 } else { 604 desc->fragno = 0; 605 desc->fraglen = 0; 606 } 607 return 0; 608 } 609 610 int 611 gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 612 int offset) 613 { 614 int ret; 615 struct decryptor_desc desc; 616 SKCIPHER_REQUEST_ON_STACK(req, tfm); 617 618 /* XXXJBF: */ 619 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 620 621 skcipher_request_set_tfm(req, tfm); 622 skcipher_request_set_callback(req, 0, NULL, NULL); 623 624 memset(desc.iv, 0, sizeof(desc.iv)); 625 desc.req = req; 626 desc.fragno = 0; 627 desc.fraglen = 0; 628 629 sg_init_table(desc.frags, 4); 630 631 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 632 skcipher_request_zero(req); 633 return ret; 634 } 635 636 /* 637 * This function makes the assumption that it was ultimately called 638 * from gss_wrap(). 639 * 640 * The client auth_gss code moves any existing tail data into a 641 * separate page before calling gss_wrap. 642 * The server svcauth_gss code ensures that both the head and the 643 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. 644 * 645 * Even with that guarantee, this function may be called more than 646 * once in the processing of gss_wrap(). The best we can do is 647 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the 648 * largest expected shift will fit within RPC_MAX_AUTH_SIZE. 649 * At run-time we can verify that a single invocation of this 650 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. 651 */ 652 653 int 654 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) 655 { 656 u8 *p; 657 658 if (shiftlen == 0) 659 return 0; 660 661 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); 662 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); 663 664 p = buf->head[0].iov_base + base; 665 666 memmove(p + shiftlen, p, buf->head[0].iov_len - base); 667 668 buf->head[0].iov_len += shiftlen; 669 buf->len += shiftlen; 670 671 return 0; 672 } 673 674 static u32 675 gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 676 u32 offset, u8 *iv, struct page **pages, int encrypt) 677 { 678 u32 ret; 679 struct scatterlist sg[1]; 680 SKCIPHER_REQUEST_ON_STACK(req, cipher); 681 u8 *data; 682 struct page **save_pages; 683 u32 len = buf->len - offset; 684 685 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) { 686 WARN_ON(0); 687 return -ENOMEM; 688 } 689 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); 690 if (!data) 691 return -ENOMEM; 692 693 /* 694 * For encryption, we want to read from the cleartext 695 * page cache pages, and write the encrypted data to 696 * the supplied xdr_buf pages. 697 */ 698 save_pages = buf->pages; 699 if (encrypt) 700 buf->pages = pages; 701 702 ret = read_bytes_from_xdr_buf(buf, offset, data, len); 703 buf->pages = save_pages; 704 if (ret) 705 goto out; 706 707 sg_init_one(sg, data, len); 708 709 skcipher_request_set_tfm(req, cipher); 710 skcipher_request_set_callback(req, 0, NULL, NULL); 711 skcipher_request_set_crypt(req, sg, sg, len, iv); 712 713 if (encrypt) 714 ret = crypto_skcipher_encrypt(req); 715 else 716 ret = crypto_skcipher_decrypt(req); 717 718 skcipher_request_zero(req); 719 720 if (ret) 721 goto out; 722 723 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 724 725 out: 726 kfree(data); 727 return ret; 728 } 729 730 u32 731 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, 732 struct xdr_buf *buf, struct page **pages) 733 { 734 u32 err; 735 struct xdr_netobj hmac; 736 u8 *cksumkey; 737 u8 *ecptr; 738 struct crypto_skcipher *cipher, *aux_cipher; 739 int blocksize; 740 struct page **save_pages; 741 int nblocks, nbytes; 742 struct encryptor_desc desc; 743 u32 cbcbytes; 744 unsigned int usage; 745 746 if (kctx->initiate) { 747 cipher = kctx->initiator_enc; 748 aux_cipher = kctx->initiator_enc_aux; 749 cksumkey = kctx->initiator_integ; 750 usage = KG_USAGE_INITIATOR_SEAL; 751 } else { 752 cipher = kctx->acceptor_enc; 753 aux_cipher = kctx->acceptor_enc_aux; 754 cksumkey = kctx->acceptor_integ; 755 usage = KG_USAGE_ACCEPTOR_SEAL; 756 } 757 blocksize = crypto_skcipher_blocksize(cipher); 758 759 /* hide the gss token header and insert the confounder */ 760 offset += GSS_KRB5_TOK_HDR_LEN; 761 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) 762 return GSS_S_FAILURE; 763 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); 764 offset -= GSS_KRB5_TOK_HDR_LEN; 765 766 if (buf->tail[0].iov_base != NULL) { 767 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; 768 } else { 769 buf->tail[0].iov_base = buf->head[0].iov_base 770 + buf->head[0].iov_len; 771 buf->tail[0].iov_len = 0; 772 ecptr = buf->tail[0].iov_base; 773 } 774 775 /* copy plaintext gss token header after filler (if any) */ 776 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); 777 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; 778 buf->len += GSS_KRB5_TOK_HDR_LEN; 779 780 /* Do the HMAC */ 781 hmac.len = GSS_KRB5_MAX_CKSUM_LEN; 782 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; 783 784 /* 785 * When we are called, pages points to the real page cache 786 * data -- which we can't go and encrypt! buf->pages points 787 * to scratch pages which we are going to send off to the 788 * client/server. Swap in the plaintext pages to calculate 789 * the hmac. 790 */ 791 save_pages = buf->pages; 792 buf->pages = pages; 793 794 err = make_checksum_v2(kctx, NULL, 0, buf, 795 offset + GSS_KRB5_TOK_HDR_LEN, 796 cksumkey, usage, &hmac); 797 buf->pages = save_pages; 798 if (err) 799 return GSS_S_FAILURE; 800 801 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; 802 nblocks = (nbytes + blocksize - 1) / blocksize; 803 cbcbytes = 0; 804 if (nblocks > 2) 805 cbcbytes = (nblocks - 2) * blocksize; 806 807 memset(desc.iv, 0, sizeof(desc.iv)); 808 809 if (cbcbytes) { 810 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 811 812 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 813 desc.fragno = 0; 814 desc.fraglen = 0; 815 desc.pages = pages; 816 desc.outbuf = buf; 817 desc.req = req; 818 819 skcipher_request_set_tfm(req, aux_cipher); 820 skcipher_request_set_callback(req, 0, NULL, NULL); 821 822 sg_init_table(desc.infrags, 4); 823 sg_init_table(desc.outfrags, 4); 824 825 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 826 cbcbytes, encryptor, &desc); 827 skcipher_request_zero(req); 828 if (err) 829 goto out_err; 830 } 831 832 /* Make sure IV carries forward from any CBC results. */ 833 err = gss_krb5_cts_crypt(cipher, buf, 834 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, 835 desc.iv, pages, 1); 836 if (err) { 837 err = GSS_S_FAILURE; 838 goto out_err; 839 } 840 841 /* Now update buf to account for HMAC */ 842 buf->tail[0].iov_len += kctx->gk5e->cksumlength; 843 buf->len += kctx->gk5e->cksumlength; 844 845 out_err: 846 if (err) 847 err = GSS_S_FAILURE; 848 return err; 849 } 850 851 u32 852 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, 853 u32 *headskip, u32 *tailskip) 854 { 855 struct xdr_buf subbuf; 856 u32 ret = 0; 857 u8 *cksum_key; 858 struct crypto_skcipher *cipher, *aux_cipher; 859 struct xdr_netobj our_hmac_obj; 860 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 861 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 862 int nblocks, blocksize, cbcbytes; 863 struct decryptor_desc desc; 864 unsigned int usage; 865 866 if (kctx->initiate) { 867 cipher = kctx->acceptor_enc; 868 aux_cipher = kctx->acceptor_enc_aux; 869 cksum_key = kctx->acceptor_integ; 870 usage = KG_USAGE_ACCEPTOR_SEAL; 871 } else { 872 cipher = kctx->initiator_enc; 873 aux_cipher = kctx->initiator_enc_aux; 874 cksum_key = kctx->initiator_integ; 875 usage = KG_USAGE_INITIATOR_SEAL; 876 } 877 blocksize = crypto_skcipher_blocksize(cipher); 878 879 880 /* create a segment skipping the header and leaving out the checksum */ 881 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, 882 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - 883 kctx->gk5e->cksumlength)); 884 885 nblocks = (subbuf.len + blocksize - 1) / blocksize; 886 887 cbcbytes = 0; 888 if (nblocks > 2) 889 cbcbytes = (nblocks - 2) * blocksize; 890 891 memset(desc.iv, 0, sizeof(desc.iv)); 892 893 if (cbcbytes) { 894 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 895 896 desc.fragno = 0; 897 desc.fraglen = 0; 898 desc.req = req; 899 900 skcipher_request_set_tfm(req, aux_cipher); 901 skcipher_request_set_callback(req, 0, NULL, NULL); 902 903 sg_init_table(desc.frags, 4); 904 905 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 906 skcipher_request_zero(req); 907 if (ret) 908 goto out_err; 909 } 910 911 /* Make sure IV carries forward from any CBC results. */ 912 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); 913 if (ret) 914 goto out_err; 915 916 917 /* Calculate our hmac over the plaintext data */ 918 our_hmac_obj.len = sizeof(our_hmac); 919 our_hmac_obj.data = our_hmac; 920 921 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, 922 cksum_key, usage, &our_hmac_obj); 923 if (ret) 924 goto out_err; 925 926 /* Get the packet's hmac value */ 927 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, 928 pkt_hmac, kctx->gk5e->cksumlength); 929 if (ret) 930 goto out_err; 931 932 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { 933 ret = GSS_S_BAD_SIG; 934 goto out_err; 935 } 936 *headskip = kctx->gk5e->conflen; 937 *tailskip = kctx->gk5e->cksumlength; 938 out_err: 939 if (ret && ret != GSS_S_BAD_SIG) 940 ret = GSS_S_FAILURE; 941 return ret; 942 } 943 944 /* 945 * Compute Kseq given the initial session key and the checksum. 946 * Set the key of the given cipher. 947 */ 948 int 949 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 950 unsigned char *cksum) 951 { 952 struct crypto_shash *hmac; 953 struct shash_desc *desc; 954 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 955 u32 zeroconstant = 0; 956 int err; 957 958 dprintk("%s: entered\n", __func__); 959 960 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 961 if (IS_ERR(hmac)) { 962 dprintk("%s: error %ld, allocating hash '%s'\n", 963 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 964 return PTR_ERR(hmac); 965 } 966 967 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 968 GFP_NOFS); 969 if (!desc) { 970 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 971 __func__, kctx->gk5e->cksum_name); 972 crypto_free_shash(hmac); 973 return -ENOMEM; 974 } 975 976 desc->tfm = hmac; 977 desc->flags = 0; 978 979 /* Compute intermediate Kseq from session key */ 980 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 981 if (err) 982 goto out_err; 983 984 err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 985 if (err) 986 goto out_err; 987 988 /* Compute final Kseq from the checksum and intermediate Kseq */ 989 err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 990 if (err) 991 goto out_err; 992 993 err = crypto_shash_digest(desc, cksum, 8, Kseq); 994 if (err) 995 goto out_err; 996 997 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 998 if (err) 999 goto out_err; 1000 1001 err = 0; 1002 1003 out_err: 1004 kzfree(desc); 1005 crypto_free_shash(hmac); 1006 dprintk("%s: returning %d\n", __func__, err); 1007 return err; 1008 } 1009 1010 /* 1011 * Compute Kcrypt given the initial session key and the plaintext seqnum. 1012 * Set the key of cipher kctx->enc. 1013 */ 1014 int 1015 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 1016 s32 seqnum) 1017 { 1018 struct crypto_shash *hmac; 1019 struct shash_desc *desc; 1020 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1021 u8 zeroconstant[4] = {0}; 1022 u8 seqnumarray[4]; 1023 int err, i; 1024 1025 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1026 1027 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1028 if (IS_ERR(hmac)) { 1029 dprintk("%s: error %ld, allocating hash '%s'\n", 1030 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1031 return PTR_ERR(hmac); 1032 } 1033 1034 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 1035 GFP_NOFS); 1036 if (!desc) { 1037 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1038 __func__, kctx->gk5e->cksum_name); 1039 crypto_free_shash(hmac); 1040 return -ENOMEM; 1041 } 1042 1043 desc->tfm = hmac; 1044 desc->flags = 0; 1045 1046 /* Compute intermediate Kcrypt from session key */ 1047 for (i = 0; i < kctx->gk5e->keylength; i++) 1048 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1049 1050 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1051 if (err) 1052 goto out_err; 1053 1054 err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1055 if (err) 1056 goto out_err; 1057 1058 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1059 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1060 if (err) 1061 goto out_err; 1062 1063 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); 1064 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); 1065 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1066 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1067 1068 err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1069 if (err) 1070 goto out_err; 1071 1072 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1073 if (err) 1074 goto out_err; 1075 1076 err = 0; 1077 1078 out_err: 1079 kzfree(desc); 1080 crypto_free_shash(hmac); 1081 dprintk("%s: returning %d\n", __func__, err); 1082 return err; 1083 } 1084