1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <crypto/hash.h> 38 #include <crypto/skcipher.h> 39 #include <linux/err.h> 40 #include <linux/types.h> 41 #include <linux/mm.h> 42 #include <linux/scatterlist.h> 43 #include <linux/highmem.h> 44 #include <linux/pagemap.h> 45 #include <linux/random.h> 46 #include <linux/sunrpc/gss_krb5.h> 47 #include <linux/sunrpc/xdr.h> 48 49 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 50 # define RPCDBG_FACILITY RPCDBG_AUTH 51 #endif 52 53 u32 54 krb5_encrypt( 55 struct crypto_skcipher *tfm, 56 void * iv, 57 void * in, 58 void * out, 59 int length) 60 { 61 u32 ret = -EINVAL; 62 struct scatterlist sg[1]; 63 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 64 SKCIPHER_REQUEST_ON_STACK(req, tfm); 65 66 if (length % crypto_skcipher_blocksize(tfm) != 0) 67 goto out; 68 69 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 70 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 71 crypto_skcipher_ivsize(tfm)); 72 goto out; 73 } 74 75 if (iv) 76 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 77 78 memcpy(out, in, length); 79 sg_init_one(sg, out, length); 80 81 skcipher_request_set_tfm(req, tfm); 82 skcipher_request_set_callback(req, 0, NULL, NULL); 83 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 84 85 ret = crypto_skcipher_encrypt(req); 86 skcipher_request_zero(req); 87 out: 88 dprintk("RPC: krb5_encrypt returns %d\n", ret); 89 return ret; 90 } 91 92 u32 93 krb5_decrypt( 94 struct crypto_skcipher *tfm, 95 void * iv, 96 void * in, 97 void * out, 98 int length) 99 { 100 u32 ret = -EINVAL; 101 struct scatterlist sg[1]; 102 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 103 SKCIPHER_REQUEST_ON_STACK(req, tfm); 104 105 if (length % crypto_skcipher_blocksize(tfm) != 0) 106 goto out; 107 108 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 109 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 110 crypto_skcipher_ivsize(tfm)); 111 goto out; 112 } 113 if (iv) 114 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 115 116 memcpy(out, in, length); 117 sg_init_one(sg, out, length); 118 119 skcipher_request_set_tfm(req, tfm); 120 skcipher_request_set_callback(req, 0, NULL, NULL); 121 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 122 123 ret = crypto_skcipher_decrypt(req); 124 skcipher_request_zero(req); 125 out: 126 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 127 return ret; 128 } 129 130 static int 131 checksummer(struct scatterlist *sg, void *data) 132 { 133 struct ahash_request *req = data; 134 135 ahash_request_set_crypt(req, sg, NULL, sg->length); 136 137 return crypto_ahash_update(req); 138 } 139 140 static int 141 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) 142 { 143 unsigned int ms_usage; 144 145 switch (usage) { 146 case KG_USAGE_SIGN: 147 ms_usage = 15; 148 break; 149 case KG_USAGE_SEAL: 150 ms_usage = 13; 151 break; 152 default: 153 return -EINVAL; 154 } 155 salt[0] = (ms_usage >> 0) & 0xff; 156 salt[1] = (ms_usage >> 8) & 0xff; 157 salt[2] = (ms_usage >> 16) & 0xff; 158 salt[3] = (ms_usage >> 24) & 0xff; 159 160 return 0; 161 } 162 163 static u32 164 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, 165 struct xdr_buf *body, int body_offset, u8 *cksumkey, 166 unsigned int usage, struct xdr_netobj *cksumout) 167 { 168 struct scatterlist sg[1]; 169 int err = -1; 170 u8 *checksumdata; 171 u8 rc4salt[4]; 172 struct crypto_ahash *md5; 173 struct crypto_ahash *hmac_md5; 174 struct ahash_request *req; 175 176 if (cksumkey == NULL) 177 return GSS_S_FAILURE; 178 179 if (cksumout->len < kctx->gk5e->cksumlength) { 180 dprintk("%s: checksum buffer length, %u, too small for %s\n", 181 __func__, cksumout->len, kctx->gk5e->name); 182 return GSS_S_FAILURE; 183 } 184 185 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { 186 dprintk("%s: invalid usage value %u\n", __func__, usage); 187 return GSS_S_FAILURE; 188 } 189 190 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 191 if (!checksumdata) 192 return GSS_S_FAILURE; 193 194 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 195 if (IS_ERR(md5)) 196 goto out_free_cksum; 197 198 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 199 CRYPTO_ALG_ASYNC); 200 if (IS_ERR(hmac_md5)) 201 goto out_free_md5; 202 203 req = ahash_request_alloc(md5, GFP_NOFS); 204 if (!req) 205 goto out_free_hmac_md5; 206 207 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 208 209 err = crypto_ahash_init(req); 210 if (err) 211 goto out; 212 sg_init_one(sg, rc4salt, 4); 213 ahash_request_set_crypt(req, sg, NULL, 4); 214 err = crypto_ahash_update(req); 215 if (err) 216 goto out; 217 218 sg_init_one(sg, header, hdrlen); 219 ahash_request_set_crypt(req, sg, NULL, hdrlen); 220 err = crypto_ahash_update(req); 221 if (err) 222 goto out; 223 err = xdr_process_buf(body, body_offset, body->len - body_offset, 224 checksummer, req); 225 if (err) 226 goto out; 227 ahash_request_set_crypt(req, NULL, checksumdata, 0); 228 err = crypto_ahash_final(req); 229 if (err) 230 goto out; 231 232 ahash_request_free(req); 233 req = ahash_request_alloc(hmac_md5, GFP_NOFS); 234 if (!req) 235 goto out_free_hmac_md5; 236 237 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 238 239 err = crypto_ahash_init(req); 240 if (err) 241 goto out; 242 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 243 if (err) 244 goto out; 245 246 sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 247 ahash_request_set_crypt(req, sg, checksumdata, 248 crypto_ahash_digestsize(md5)); 249 err = crypto_ahash_digest(req); 250 if (err) 251 goto out; 252 253 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 254 cksumout->len = kctx->gk5e->cksumlength; 255 out: 256 ahash_request_free(req); 257 out_free_hmac_md5: 258 crypto_free_ahash(hmac_md5); 259 out_free_md5: 260 crypto_free_ahash(md5); 261 out_free_cksum: 262 kfree(checksumdata); 263 return err ? GSS_S_FAILURE : 0; 264 } 265 266 /* 267 * checksum the plaintext data and hdrlen bytes of the token header 268 * The checksum is performed over the first 8 bytes of the 269 * gss token header and then over the data body 270 */ 271 u32 272 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, 273 struct xdr_buf *body, int body_offset, u8 *cksumkey, 274 unsigned int usage, struct xdr_netobj *cksumout) 275 { 276 struct crypto_ahash *tfm; 277 struct ahash_request *req; 278 struct scatterlist sg[1]; 279 int err = -1; 280 u8 *checksumdata; 281 unsigned int checksumlen; 282 283 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 284 return make_checksum_hmac_md5(kctx, header, hdrlen, 285 body, body_offset, 286 cksumkey, usage, cksumout); 287 288 if (cksumout->len < kctx->gk5e->cksumlength) { 289 dprintk("%s: checksum buffer length, %u, too small for %s\n", 290 __func__, cksumout->len, kctx->gk5e->name); 291 return GSS_S_FAILURE; 292 } 293 294 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 295 if (checksumdata == NULL) 296 return GSS_S_FAILURE; 297 298 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 299 if (IS_ERR(tfm)) 300 goto out_free_cksum; 301 302 req = ahash_request_alloc(tfm, GFP_NOFS); 303 if (!req) 304 goto out_free_ahash; 305 306 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 307 308 checksumlen = crypto_ahash_digestsize(tfm); 309 310 if (cksumkey != NULL) { 311 err = crypto_ahash_setkey(tfm, cksumkey, 312 kctx->gk5e->keylength); 313 if (err) 314 goto out; 315 } 316 317 err = crypto_ahash_init(req); 318 if (err) 319 goto out; 320 sg_init_one(sg, header, hdrlen); 321 ahash_request_set_crypt(req, sg, NULL, hdrlen); 322 err = crypto_ahash_update(req); 323 if (err) 324 goto out; 325 err = xdr_process_buf(body, body_offset, body->len - body_offset, 326 checksummer, req); 327 if (err) 328 goto out; 329 ahash_request_set_crypt(req, NULL, checksumdata, 0); 330 err = crypto_ahash_final(req); 331 if (err) 332 goto out; 333 334 switch (kctx->gk5e->ctype) { 335 case CKSUMTYPE_RSA_MD5: 336 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, 337 checksumdata, checksumlen); 338 if (err) 339 goto out; 340 memcpy(cksumout->data, 341 checksumdata + checksumlen - kctx->gk5e->cksumlength, 342 kctx->gk5e->cksumlength); 343 break; 344 case CKSUMTYPE_HMAC_SHA1_DES3: 345 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 346 break; 347 default: 348 BUG(); 349 break; 350 } 351 cksumout->len = kctx->gk5e->cksumlength; 352 out: 353 ahash_request_free(req); 354 out_free_ahash: 355 crypto_free_ahash(tfm); 356 out_free_cksum: 357 kfree(checksumdata); 358 return err ? GSS_S_FAILURE : 0; 359 } 360 361 /* 362 * checksum the plaintext data and hdrlen bytes of the token header 363 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data 364 * body then over the first 16 octets of the MIC token 365 * Inclusion of the header data in the calculation of the 366 * checksum is optional. 367 */ 368 u32 369 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, 370 struct xdr_buf *body, int body_offset, u8 *cksumkey, 371 unsigned int usage, struct xdr_netobj *cksumout) 372 { 373 struct crypto_ahash *tfm; 374 struct ahash_request *req; 375 struct scatterlist sg[1]; 376 int err = -1; 377 u8 *checksumdata; 378 unsigned int checksumlen; 379 380 if (kctx->gk5e->keyed_cksum == 0) { 381 dprintk("%s: expected keyed hash for %s\n", 382 __func__, kctx->gk5e->name); 383 return GSS_S_FAILURE; 384 } 385 if (cksumkey == NULL) { 386 dprintk("%s: no key supplied for %s\n", 387 __func__, kctx->gk5e->name); 388 return GSS_S_FAILURE; 389 } 390 391 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 392 if (!checksumdata) 393 return GSS_S_FAILURE; 394 395 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 396 if (IS_ERR(tfm)) 397 goto out_free_cksum; 398 checksumlen = crypto_ahash_digestsize(tfm); 399 400 req = ahash_request_alloc(tfm, GFP_NOFS); 401 if (!req) 402 goto out_free_ahash; 403 404 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 405 406 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 407 if (err) 408 goto out; 409 410 err = crypto_ahash_init(req); 411 if (err) 412 goto out; 413 err = xdr_process_buf(body, body_offset, body->len - body_offset, 414 checksummer, req); 415 if (err) 416 goto out; 417 if (header != NULL) { 418 sg_init_one(sg, header, hdrlen); 419 ahash_request_set_crypt(req, sg, NULL, hdrlen); 420 err = crypto_ahash_update(req); 421 if (err) 422 goto out; 423 } 424 ahash_request_set_crypt(req, NULL, checksumdata, 0); 425 err = crypto_ahash_final(req); 426 if (err) 427 goto out; 428 429 cksumout->len = kctx->gk5e->cksumlength; 430 431 switch (kctx->gk5e->ctype) { 432 case CKSUMTYPE_HMAC_SHA1_96_AES128: 433 case CKSUMTYPE_HMAC_SHA1_96_AES256: 434 /* note that this truncates the hash */ 435 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 436 break; 437 default: 438 BUG(); 439 break; 440 } 441 out: 442 ahash_request_free(req); 443 out_free_ahash: 444 crypto_free_ahash(tfm); 445 out_free_cksum: 446 kfree(checksumdata); 447 return err ? GSS_S_FAILURE : 0; 448 } 449 450 struct encryptor_desc { 451 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 452 struct skcipher_request *req; 453 int pos; 454 struct xdr_buf *outbuf; 455 struct page **pages; 456 struct scatterlist infrags[4]; 457 struct scatterlist outfrags[4]; 458 int fragno; 459 int fraglen; 460 }; 461 462 static int 463 encryptor(struct scatterlist *sg, void *data) 464 { 465 struct encryptor_desc *desc = data; 466 struct xdr_buf *outbuf = desc->outbuf; 467 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 468 struct page *in_page; 469 int thislen = desc->fraglen + sg->length; 470 int fraglen, ret; 471 int page_pos; 472 473 /* Worst case is 4 fragments: head, end of page 1, start 474 * of page 2, tail. Anything more is a bug. */ 475 BUG_ON(desc->fragno > 3); 476 477 page_pos = desc->pos - outbuf->head[0].iov_len; 478 if (page_pos >= 0 && page_pos < outbuf->page_len) { 479 /* pages are not in place: */ 480 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; 481 in_page = desc->pages[i]; 482 } else { 483 in_page = sg_page(sg); 484 } 485 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 486 sg->offset); 487 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 488 sg->offset); 489 desc->fragno++; 490 desc->fraglen += sg->length; 491 desc->pos += sg->length; 492 493 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 494 thislen -= fraglen; 495 496 if (thislen == 0) 497 return 0; 498 499 sg_mark_end(&desc->infrags[desc->fragno - 1]); 500 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 501 502 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 503 thislen, desc->iv); 504 505 ret = crypto_skcipher_encrypt(desc->req); 506 if (ret) 507 return ret; 508 509 sg_init_table(desc->infrags, 4); 510 sg_init_table(desc->outfrags, 4); 511 512 if (fraglen) { 513 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 514 sg->offset + sg->length - fraglen); 515 desc->infrags[0] = desc->outfrags[0]; 516 sg_assign_page(&desc->infrags[0], in_page); 517 desc->fragno = 1; 518 desc->fraglen = fraglen; 519 } else { 520 desc->fragno = 0; 521 desc->fraglen = 0; 522 } 523 return 0; 524 } 525 526 int 527 gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 528 int offset, struct page **pages) 529 { 530 int ret; 531 struct encryptor_desc desc; 532 SKCIPHER_REQUEST_ON_STACK(req, tfm); 533 534 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 535 536 skcipher_request_set_tfm(req, tfm); 537 skcipher_request_set_callback(req, 0, NULL, NULL); 538 539 memset(desc.iv, 0, sizeof(desc.iv)); 540 desc.req = req; 541 desc.pos = offset; 542 desc.outbuf = buf; 543 desc.pages = pages; 544 desc.fragno = 0; 545 desc.fraglen = 0; 546 547 sg_init_table(desc.infrags, 4); 548 sg_init_table(desc.outfrags, 4); 549 550 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 551 skcipher_request_zero(req); 552 return ret; 553 } 554 555 struct decryptor_desc { 556 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 557 struct skcipher_request *req; 558 struct scatterlist frags[4]; 559 int fragno; 560 int fraglen; 561 }; 562 563 static int 564 decryptor(struct scatterlist *sg, void *data) 565 { 566 struct decryptor_desc *desc = data; 567 int thislen = desc->fraglen + sg->length; 568 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 569 int fraglen, ret; 570 571 /* Worst case is 4 fragments: head, end of page 1, start 572 * of page 2, tail. Anything more is a bug. */ 573 BUG_ON(desc->fragno > 3); 574 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 575 sg->offset); 576 desc->fragno++; 577 desc->fraglen += sg->length; 578 579 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 580 thislen -= fraglen; 581 582 if (thislen == 0) 583 return 0; 584 585 sg_mark_end(&desc->frags[desc->fragno - 1]); 586 587 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 588 thislen, desc->iv); 589 590 ret = crypto_skcipher_decrypt(desc->req); 591 if (ret) 592 return ret; 593 594 sg_init_table(desc->frags, 4); 595 596 if (fraglen) { 597 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 598 sg->offset + sg->length - fraglen); 599 desc->fragno = 1; 600 desc->fraglen = fraglen; 601 } else { 602 desc->fragno = 0; 603 desc->fraglen = 0; 604 } 605 return 0; 606 } 607 608 int 609 gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 610 int offset) 611 { 612 int ret; 613 struct decryptor_desc desc; 614 SKCIPHER_REQUEST_ON_STACK(req, tfm); 615 616 /* XXXJBF: */ 617 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 618 619 skcipher_request_set_tfm(req, tfm); 620 skcipher_request_set_callback(req, 0, NULL, NULL); 621 622 memset(desc.iv, 0, sizeof(desc.iv)); 623 desc.req = req; 624 desc.fragno = 0; 625 desc.fraglen = 0; 626 627 sg_init_table(desc.frags, 4); 628 629 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 630 skcipher_request_zero(req); 631 return ret; 632 } 633 634 /* 635 * This function makes the assumption that it was ultimately called 636 * from gss_wrap(). 637 * 638 * The client auth_gss code moves any existing tail data into a 639 * separate page before calling gss_wrap. 640 * The server svcauth_gss code ensures that both the head and the 641 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. 642 * 643 * Even with that guarantee, this function may be called more than 644 * once in the processing of gss_wrap(). The best we can do is 645 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the 646 * largest expected shift will fit within RPC_MAX_AUTH_SIZE. 647 * At run-time we can verify that a single invocation of this 648 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. 649 */ 650 651 int 652 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) 653 { 654 u8 *p; 655 656 if (shiftlen == 0) 657 return 0; 658 659 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); 660 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); 661 662 p = buf->head[0].iov_base + base; 663 664 memmove(p + shiftlen, p, buf->head[0].iov_len - base); 665 666 buf->head[0].iov_len += shiftlen; 667 buf->len += shiftlen; 668 669 return 0; 670 } 671 672 static u32 673 gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 674 u32 offset, u8 *iv, struct page **pages, int encrypt) 675 { 676 u32 ret; 677 struct scatterlist sg[1]; 678 SKCIPHER_REQUEST_ON_STACK(req, cipher); 679 u8 *data; 680 struct page **save_pages; 681 u32 len = buf->len - offset; 682 683 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) { 684 WARN_ON(0); 685 return -ENOMEM; 686 } 687 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); 688 if (!data) 689 return -ENOMEM; 690 691 /* 692 * For encryption, we want to read from the cleartext 693 * page cache pages, and write the encrypted data to 694 * the supplied xdr_buf pages. 695 */ 696 save_pages = buf->pages; 697 if (encrypt) 698 buf->pages = pages; 699 700 ret = read_bytes_from_xdr_buf(buf, offset, data, len); 701 buf->pages = save_pages; 702 if (ret) 703 goto out; 704 705 sg_init_one(sg, data, len); 706 707 skcipher_request_set_tfm(req, cipher); 708 skcipher_request_set_callback(req, 0, NULL, NULL); 709 skcipher_request_set_crypt(req, sg, sg, len, iv); 710 711 if (encrypt) 712 ret = crypto_skcipher_encrypt(req); 713 else 714 ret = crypto_skcipher_decrypt(req); 715 716 skcipher_request_zero(req); 717 718 if (ret) 719 goto out; 720 721 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 722 723 out: 724 kfree(data); 725 return ret; 726 } 727 728 u32 729 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, 730 struct xdr_buf *buf, struct page **pages) 731 { 732 u32 err; 733 struct xdr_netobj hmac; 734 u8 *cksumkey; 735 u8 *ecptr; 736 struct crypto_skcipher *cipher, *aux_cipher; 737 int blocksize; 738 struct page **save_pages; 739 int nblocks, nbytes; 740 struct encryptor_desc desc; 741 u32 cbcbytes; 742 unsigned int usage; 743 744 if (kctx->initiate) { 745 cipher = kctx->initiator_enc; 746 aux_cipher = kctx->initiator_enc_aux; 747 cksumkey = kctx->initiator_integ; 748 usage = KG_USAGE_INITIATOR_SEAL; 749 } else { 750 cipher = kctx->acceptor_enc; 751 aux_cipher = kctx->acceptor_enc_aux; 752 cksumkey = kctx->acceptor_integ; 753 usage = KG_USAGE_ACCEPTOR_SEAL; 754 } 755 blocksize = crypto_skcipher_blocksize(cipher); 756 757 /* hide the gss token header and insert the confounder */ 758 offset += GSS_KRB5_TOK_HDR_LEN; 759 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) 760 return GSS_S_FAILURE; 761 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); 762 offset -= GSS_KRB5_TOK_HDR_LEN; 763 764 if (buf->tail[0].iov_base != NULL) { 765 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; 766 } else { 767 buf->tail[0].iov_base = buf->head[0].iov_base 768 + buf->head[0].iov_len; 769 buf->tail[0].iov_len = 0; 770 ecptr = buf->tail[0].iov_base; 771 } 772 773 /* copy plaintext gss token header after filler (if any) */ 774 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); 775 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; 776 buf->len += GSS_KRB5_TOK_HDR_LEN; 777 778 /* Do the HMAC */ 779 hmac.len = GSS_KRB5_MAX_CKSUM_LEN; 780 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; 781 782 /* 783 * When we are called, pages points to the real page cache 784 * data -- which we can't go and encrypt! buf->pages points 785 * to scratch pages which we are going to send off to the 786 * client/server. Swap in the plaintext pages to calculate 787 * the hmac. 788 */ 789 save_pages = buf->pages; 790 buf->pages = pages; 791 792 err = make_checksum_v2(kctx, NULL, 0, buf, 793 offset + GSS_KRB5_TOK_HDR_LEN, 794 cksumkey, usage, &hmac); 795 buf->pages = save_pages; 796 if (err) 797 return GSS_S_FAILURE; 798 799 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; 800 nblocks = (nbytes + blocksize - 1) / blocksize; 801 cbcbytes = 0; 802 if (nblocks > 2) 803 cbcbytes = (nblocks - 2) * blocksize; 804 805 memset(desc.iv, 0, sizeof(desc.iv)); 806 807 if (cbcbytes) { 808 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 809 810 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 811 desc.fragno = 0; 812 desc.fraglen = 0; 813 desc.pages = pages; 814 desc.outbuf = buf; 815 desc.req = req; 816 817 skcipher_request_set_tfm(req, aux_cipher); 818 skcipher_request_set_callback(req, 0, NULL, NULL); 819 820 sg_init_table(desc.infrags, 4); 821 sg_init_table(desc.outfrags, 4); 822 823 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 824 cbcbytes, encryptor, &desc); 825 skcipher_request_zero(req); 826 if (err) 827 goto out_err; 828 } 829 830 /* Make sure IV carries forward from any CBC results. */ 831 err = gss_krb5_cts_crypt(cipher, buf, 832 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, 833 desc.iv, pages, 1); 834 if (err) { 835 err = GSS_S_FAILURE; 836 goto out_err; 837 } 838 839 /* Now update buf to account for HMAC */ 840 buf->tail[0].iov_len += kctx->gk5e->cksumlength; 841 buf->len += kctx->gk5e->cksumlength; 842 843 out_err: 844 if (err) 845 err = GSS_S_FAILURE; 846 return err; 847 } 848 849 u32 850 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, 851 u32 *headskip, u32 *tailskip) 852 { 853 struct xdr_buf subbuf; 854 u32 ret = 0; 855 u8 *cksum_key; 856 struct crypto_skcipher *cipher, *aux_cipher; 857 struct xdr_netobj our_hmac_obj; 858 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 859 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 860 int nblocks, blocksize, cbcbytes; 861 struct decryptor_desc desc; 862 unsigned int usage; 863 864 if (kctx->initiate) { 865 cipher = kctx->acceptor_enc; 866 aux_cipher = kctx->acceptor_enc_aux; 867 cksum_key = kctx->acceptor_integ; 868 usage = KG_USAGE_ACCEPTOR_SEAL; 869 } else { 870 cipher = kctx->initiator_enc; 871 aux_cipher = kctx->initiator_enc_aux; 872 cksum_key = kctx->initiator_integ; 873 usage = KG_USAGE_INITIATOR_SEAL; 874 } 875 blocksize = crypto_skcipher_blocksize(cipher); 876 877 878 /* create a segment skipping the header and leaving out the checksum */ 879 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, 880 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - 881 kctx->gk5e->cksumlength)); 882 883 nblocks = (subbuf.len + blocksize - 1) / blocksize; 884 885 cbcbytes = 0; 886 if (nblocks > 2) 887 cbcbytes = (nblocks - 2) * blocksize; 888 889 memset(desc.iv, 0, sizeof(desc.iv)); 890 891 if (cbcbytes) { 892 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 893 894 desc.fragno = 0; 895 desc.fraglen = 0; 896 desc.req = req; 897 898 skcipher_request_set_tfm(req, aux_cipher); 899 skcipher_request_set_callback(req, 0, NULL, NULL); 900 901 sg_init_table(desc.frags, 4); 902 903 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 904 skcipher_request_zero(req); 905 if (ret) 906 goto out_err; 907 } 908 909 /* Make sure IV carries forward from any CBC results. */ 910 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); 911 if (ret) 912 goto out_err; 913 914 915 /* Calculate our hmac over the plaintext data */ 916 our_hmac_obj.len = sizeof(our_hmac); 917 our_hmac_obj.data = our_hmac; 918 919 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, 920 cksum_key, usage, &our_hmac_obj); 921 if (ret) 922 goto out_err; 923 924 /* Get the packet's hmac value */ 925 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, 926 pkt_hmac, kctx->gk5e->cksumlength); 927 if (ret) 928 goto out_err; 929 930 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { 931 ret = GSS_S_BAD_SIG; 932 goto out_err; 933 } 934 *headskip = kctx->gk5e->conflen; 935 *tailskip = kctx->gk5e->cksumlength; 936 out_err: 937 if (ret && ret != GSS_S_BAD_SIG) 938 ret = GSS_S_FAILURE; 939 return ret; 940 } 941 942 /* 943 * Compute Kseq given the initial session key and the checksum. 944 * Set the key of the given cipher. 945 */ 946 int 947 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 948 unsigned char *cksum) 949 { 950 struct crypto_shash *hmac; 951 struct shash_desc *desc; 952 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 953 u32 zeroconstant = 0; 954 int err; 955 956 dprintk("%s: entered\n", __func__); 957 958 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 959 if (IS_ERR(hmac)) { 960 dprintk("%s: error %ld, allocating hash '%s'\n", 961 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 962 return PTR_ERR(hmac); 963 } 964 965 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 966 GFP_NOFS); 967 if (!desc) { 968 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 969 __func__, kctx->gk5e->cksum_name); 970 crypto_free_shash(hmac); 971 return -ENOMEM; 972 } 973 974 desc->tfm = hmac; 975 desc->flags = 0; 976 977 /* Compute intermediate Kseq from session key */ 978 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 979 if (err) 980 goto out_err; 981 982 err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 983 if (err) 984 goto out_err; 985 986 /* Compute final Kseq from the checksum and intermediate Kseq */ 987 err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 988 if (err) 989 goto out_err; 990 991 err = crypto_shash_digest(desc, cksum, 8, Kseq); 992 if (err) 993 goto out_err; 994 995 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 996 if (err) 997 goto out_err; 998 999 err = 0; 1000 1001 out_err: 1002 kzfree(desc); 1003 crypto_free_shash(hmac); 1004 dprintk("%s: returning %d\n", __func__, err); 1005 return err; 1006 } 1007 1008 /* 1009 * Compute Kcrypt given the initial session key and the plaintext seqnum. 1010 * Set the key of cipher kctx->enc. 1011 */ 1012 int 1013 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 1014 s32 seqnum) 1015 { 1016 struct crypto_shash *hmac; 1017 struct shash_desc *desc; 1018 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1019 u8 zeroconstant[4] = {0}; 1020 u8 seqnumarray[4]; 1021 int err, i; 1022 1023 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1024 1025 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1026 if (IS_ERR(hmac)) { 1027 dprintk("%s: error %ld, allocating hash '%s'\n", 1028 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1029 return PTR_ERR(hmac); 1030 } 1031 1032 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 1033 GFP_NOFS); 1034 if (!desc) { 1035 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1036 __func__, kctx->gk5e->cksum_name); 1037 crypto_free_shash(hmac); 1038 return -ENOMEM; 1039 } 1040 1041 desc->tfm = hmac; 1042 desc->flags = 0; 1043 1044 /* Compute intermediate Kcrypt from session key */ 1045 for (i = 0; i < kctx->gk5e->keylength; i++) 1046 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1047 1048 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1049 if (err) 1050 goto out_err; 1051 1052 err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1053 if (err) 1054 goto out_err; 1055 1056 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1057 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1058 if (err) 1059 goto out_err; 1060 1061 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); 1062 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); 1063 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1064 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1065 1066 err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1067 if (err) 1068 goto out_err; 1069 1070 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1071 if (err) 1072 goto out_err; 1073 1074 err = 0; 1075 1076 out_err: 1077 kzfree(desc); 1078 crypto_free_shash(hmac); 1079 dprintk("%s: returning %d\n", __func__, err); 1080 return err; 1081 } 1082 1083