1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <crypto/algapi.h> 38 #include <crypto/hash.h> 39 #include <crypto/skcipher.h> 40 #include <linux/err.h> 41 #include <linux/types.h> 42 #include <linux/mm.h> 43 #include <linux/scatterlist.h> 44 #include <linux/highmem.h> 45 #include <linux/pagemap.h> 46 #include <linux/random.h> 47 #include <linux/sunrpc/gss_krb5.h> 48 #include <linux/sunrpc/xdr.h> 49 50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 51 # define RPCDBG_FACILITY RPCDBG_AUTH 52 #endif 53 54 u32 55 krb5_encrypt( 56 struct crypto_skcipher *tfm, 57 void * iv, 58 void * in, 59 void * out, 60 int length) 61 { 62 u32 ret = -EINVAL; 63 struct scatterlist sg[1]; 64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 65 SKCIPHER_REQUEST_ON_STACK(req, tfm); 66 67 if (length % crypto_skcipher_blocksize(tfm) != 0) 68 goto out; 69 70 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 72 crypto_skcipher_ivsize(tfm)); 73 goto out; 74 } 75 76 if (iv) 77 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 78 79 memcpy(out, in, length); 80 sg_init_one(sg, out, length); 81 82 skcipher_request_set_tfm(req, tfm); 83 skcipher_request_set_callback(req, 0, NULL, NULL); 84 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 85 86 ret = crypto_skcipher_encrypt(req); 87 skcipher_request_zero(req); 88 out: 89 dprintk("RPC: krb5_encrypt returns %d\n", ret); 90 return ret; 91 } 92 93 u32 94 krb5_decrypt( 95 struct crypto_skcipher *tfm, 96 void * iv, 97 void * in, 98 void * out, 99 int length) 100 { 101 u32 ret = -EINVAL; 102 struct scatterlist sg[1]; 103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 104 SKCIPHER_REQUEST_ON_STACK(req, tfm); 105 106 if (length % crypto_skcipher_blocksize(tfm) != 0) 107 goto out; 108 109 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 111 crypto_skcipher_ivsize(tfm)); 112 goto out; 113 } 114 if (iv) 115 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 116 117 memcpy(out, in, length); 118 sg_init_one(sg, out, length); 119 120 skcipher_request_set_tfm(req, tfm); 121 skcipher_request_set_callback(req, 0, NULL, NULL); 122 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 123 124 ret = crypto_skcipher_decrypt(req); 125 skcipher_request_zero(req); 126 out: 127 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 128 return ret; 129 } 130 131 static int 132 checksummer(struct scatterlist *sg, void *data) 133 { 134 struct ahash_request *req = data; 135 136 ahash_request_set_crypt(req, sg, NULL, sg->length); 137 138 return crypto_ahash_update(req); 139 } 140 141 static int 142 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) 143 { 144 unsigned int ms_usage; 145 146 switch (usage) { 147 case KG_USAGE_SIGN: 148 ms_usage = 15; 149 break; 150 case KG_USAGE_SEAL: 151 ms_usage = 13; 152 break; 153 default: 154 return -EINVAL; 155 } 156 salt[0] = (ms_usage >> 0) & 0xff; 157 salt[1] = (ms_usage >> 8) & 0xff; 158 salt[2] = (ms_usage >> 16) & 0xff; 159 salt[3] = (ms_usage >> 24) & 0xff; 160 161 return 0; 162 } 163 164 static u32 165 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, 166 struct xdr_buf *body, int body_offset, u8 *cksumkey, 167 unsigned int usage, struct xdr_netobj *cksumout) 168 { 169 struct scatterlist sg[1]; 170 int err = -1; 171 u8 *checksumdata; 172 u8 rc4salt[4]; 173 struct crypto_ahash *md5; 174 struct crypto_ahash *hmac_md5; 175 struct ahash_request *req; 176 177 if (cksumkey == NULL) 178 return GSS_S_FAILURE; 179 180 if (cksumout->len < kctx->gk5e->cksumlength) { 181 dprintk("%s: checksum buffer length, %u, too small for %s\n", 182 __func__, cksumout->len, kctx->gk5e->name); 183 return GSS_S_FAILURE; 184 } 185 186 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { 187 dprintk("%s: invalid usage value %u\n", __func__, usage); 188 return GSS_S_FAILURE; 189 } 190 191 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 192 if (!checksumdata) 193 return GSS_S_FAILURE; 194 195 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 196 if (IS_ERR(md5)) 197 goto out_free_cksum; 198 199 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 200 CRYPTO_ALG_ASYNC); 201 if (IS_ERR(hmac_md5)) 202 goto out_free_md5; 203 204 req = ahash_request_alloc(md5, GFP_NOFS); 205 if (!req) 206 goto out_free_hmac_md5; 207 208 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 209 210 err = crypto_ahash_init(req); 211 if (err) 212 goto out; 213 sg_init_one(sg, rc4salt, 4); 214 ahash_request_set_crypt(req, sg, NULL, 4); 215 err = crypto_ahash_update(req); 216 if (err) 217 goto out; 218 219 sg_init_one(sg, header, hdrlen); 220 ahash_request_set_crypt(req, sg, NULL, hdrlen); 221 err = crypto_ahash_update(req); 222 if (err) 223 goto out; 224 err = xdr_process_buf(body, body_offset, body->len - body_offset, 225 checksummer, req); 226 if (err) 227 goto out; 228 ahash_request_set_crypt(req, NULL, checksumdata, 0); 229 err = crypto_ahash_final(req); 230 if (err) 231 goto out; 232 233 ahash_request_free(req); 234 req = ahash_request_alloc(hmac_md5, GFP_NOFS); 235 if (!req) 236 goto out_free_hmac_md5; 237 238 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 239 240 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 241 if (err) 242 goto out; 243 244 sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 245 ahash_request_set_crypt(req, sg, checksumdata, 246 crypto_ahash_digestsize(md5)); 247 err = crypto_ahash_digest(req); 248 if (err) 249 goto out; 250 251 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 252 cksumout->len = kctx->gk5e->cksumlength; 253 out: 254 ahash_request_free(req); 255 out_free_hmac_md5: 256 crypto_free_ahash(hmac_md5); 257 out_free_md5: 258 crypto_free_ahash(md5); 259 out_free_cksum: 260 kfree(checksumdata); 261 return err ? GSS_S_FAILURE : 0; 262 } 263 264 /* 265 * checksum the plaintext data and hdrlen bytes of the token header 266 * The checksum is performed over the first 8 bytes of the 267 * gss token header and then over the data body 268 */ 269 u32 270 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, 271 struct xdr_buf *body, int body_offset, u8 *cksumkey, 272 unsigned int usage, struct xdr_netobj *cksumout) 273 { 274 struct crypto_ahash *tfm; 275 struct ahash_request *req; 276 struct scatterlist sg[1]; 277 int err = -1; 278 u8 *checksumdata; 279 unsigned int checksumlen; 280 281 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 282 return make_checksum_hmac_md5(kctx, header, hdrlen, 283 body, body_offset, 284 cksumkey, usage, cksumout); 285 286 if (cksumout->len < kctx->gk5e->cksumlength) { 287 dprintk("%s: checksum buffer length, %u, too small for %s\n", 288 __func__, cksumout->len, kctx->gk5e->name); 289 return GSS_S_FAILURE; 290 } 291 292 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 293 if (checksumdata == NULL) 294 return GSS_S_FAILURE; 295 296 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 297 if (IS_ERR(tfm)) 298 goto out_free_cksum; 299 300 req = ahash_request_alloc(tfm, GFP_NOFS); 301 if (!req) 302 goto out_free_ahash; 303 304 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 305 306 checksumlen = crypto_ahash_digestsize(tfm); 307 308 if (cksumkey != NULL) { 309 err = crypto_ahash_setkey(tfm, cksumkey, 310 kctx->gk5e->keylength); 311 if (err) 312 goto out; 313 } 314 315 err = crypto_ahash_init(req); 316 if (err) 317 goto out; 318 sg_init_one(sg, header, hdrlen); 319 ahash_request_set_crypt(req, sg, NULL, hdrlen); 320 err = crypto_ahash_update(req); 321 if (err) 322 goto out; 323 err = xdr_process_buf(body, body_offset, body->len - body_offset, 324 checksummer, req); 325 if (err) 326 goto out; 327 ahash_request_set_crypt(req, NULL, checksumdata, 0); 328 err = crypto_ahash_final(req); 329 if (err) 330 goto out; 331 332 switch (kctx->gk5e->ctype) { 333 case CKSUMTYPE_RSA_MD5: 334 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, 335 checksumdata, checksumlen); 336 if (err) 337 goto out; 338 memcpy(cksumout->data, 339 checksumdata + checksumlen - kctx->gk5e->cksumlength, 340 kctx->gk5e->cksumlength); 341 break; 342 case CKSUMTYPE_HMAC_SHA1_DES3: 343 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 344 break; 345 default: 346 BUG(); 347 break; 348 } 349 cksumout->len = kctx->gk5e->cksumlength; 350 out: 351 ahash_request_free(req); 352 out_free_ahash: 353 crypto_free_ahash(tfm); 354 out_free_cksum: 355 kfree(checksumdata); 356 return err ? GSS_S_FAILURE : 0; 357 } 358 359 /* 360 * checksum the plaintext data and hdrlen bytes of the token header 361 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data 362 * body then over the first 16 octets of the MIC token 363 * Inclusion of the header data in the calculation of the 364 * checksum is optional. 365 */ 366 u32 367 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, 368 struct xdr_buf *body, int body_offset, u8 *cksumkey, 369 unsigned int usage, struct xdr_netobj *cksumout) 370 { 371 struct crypto_ahash *tfm; 372 struct ahash_request *req; 373 struct scatterlist sg[1]; 374 int err = -1; 375 u8 *checksumdata; 376 unsigned int checksumlen; 377 378 if (kctx->gk5e->keyed_cksum == 0) { 379 dprintk("%s: expected keyed hash for %s\n", 380 __func__, kctx->gk5e->name); 381 return GSS_S_FAILURE; 382 } 383 if (cksumkey == NULL) { 384 dprintk("%s: no key supplied for %s\n", 385 __func__, kctx->gk5e->name); 386 return GSS_S_FAILURE; 387 } 388 389 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 390 if (!checksumdata) 391 return GSS_S_FAILURE; 392 393 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 394 if (IS_ERR(tfm)) 395 goto out_free_cksum; 396 checksumlen = crypto_ahash_digestsize(tfm); 397 398 req = ahash_request_alloc(tfm, GFP_NOFS); 399 if (!req) 400 goto out_free_ahash; 401 402 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 403 404 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 405 if (err) 406 goto out; 407 408 err = crypto_ahash_init(req); 409 if (err) 410 goto out; 411 err = xdr_process_buf(body, body_offset, body->len - body_offset, 412 checksummer, req); 413 if (err) 414 goto out; 415 if (header != NULL) { 416 sg_init_one(sg, header, hdrlen); 417 ahash_request_set_crypt(req, sg, NULL, hdrlen); 418 err = crypto_ahash_update(req); 419 if (err) 420 goto out; 421 } 422 ahash_request_set_crypt(req, NULL, checksumdata, 0); 423 err = crypto_ahash_final(req); 424 if (err) 425 goto out; 426 427 cksumout->len = kctx->gk5e->cksumlength; 428 429 switch (kctx->gk5e->ctype) { 430 case CKSUMTYPE_HMAC_SHA1_96_AES128: 431 case CKSUMTYPE_HMAC_SHA1_96_AES256: 432 /* note that this truncates the hash */ 433 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 434 break; 435 default: 436 BUG(); 437 break; 438 } 439 out: 440 ahash_request_free(req); 441 out_free_ahash: 442 crypto_free_ahash(tfm); 443 out_free_cksum: 444 kfree(checksumdata); 445 return err ? GSS_S_FAILURE : 0; 446 } 447 448 struct encryptor_desc { 449 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 450 struct skcipher_request *req; 451 int pos; 452 struct xdr_buf *outbuf; 453 struct page **pages; 454 struct scatterlist infrags[4]; 455 struct scatterlist outfrags[4]; 456 int fragno; 457 int fraglen; 458 }; 459 460 static int 461 encryptor(struct scatterlist *sg, void *data) 462 { 463 struct encryptor_desc *desc = data; 464 struct xdr_buf *outbuf = desc->outbuf; 465 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 466 struct page *in_page; 467 int thislen = desc->fraglen + sg->length; 468 int fraglen, ret; 469 int page_pos; 470 471 /* Worst case is 4 fragments: head, end of page 1, start 472 * of page 2, tail. Anything more is a bug. */ 473 BUG_ON(desc->fragno > 3); 474 475 page_pos = desc->pos - outbuf->head[0].iov_len; 476 if (page_pos >= 0 && page_pos < outbuf->page_len) { 477 /* pages are not in place: */ 478 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; 479 in_page = desc->pages[i]; 480 } else { 481 in_page = sg_page(sg); 482 } 483 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 484 sg->offset); 485 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 486 sg->offset); 487 desc->fragno++; 488 desc->fraglen += sg->length; 489 desc->pos += sg->length; 490 491 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 492 thislen -= fraglen; 493 494 if (thislen == 0) 495 return 0; 496 497 sg_mark_end(&desc->infrags[desc->fragno - 1]); 498 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 499 500 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 501 thislen, desc->iv); 502 503 ret = crypto_skcipher_encrypt(desc->req); 504 if (ret) 505 return ret; 506 507 sg_init_table(desc->infrags, 4); 508 sg_init_table(desc->outfrags, 4); 509 510 if (fraglen) { 511 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 512 sg->offset + sg->length - fraglen); 513 desc->infrags[0] = desc->outfrags[0]; 514 sg_assign_page(&desc->infrags[0], in_page); 515 desc->fragno = 1; 516 desc->fraglen = fraglen; 517 } else { 518 desc->fragno = 0; 519 desc->fraglen = 0; 520 } 521 return 0; 522 } 523 524 int 525 gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 526 int offset, struct page **pages) 527 { 528 int ret; 529 struct encryptor_desc desc; 530 SKCIPHER_REQUEST_ON_STACK(req, tfm); 531 532 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 533 534 skcipher_request_set_tfm(req, tfm); 535 skcipher_request_set_callback(req, 0, NULL, NULL); 536 537 memset(desc.iv, 0, sizeof(desc.iv)); 538 desc.req = req; 539 desc.pos = offset; 540 desc.outbuf = buf; 541 desc.pages = pages; 542 desc.fragno = 0; 543 desc.fraglen = 0; 544 545 sg_init_table(desc.infrags, 4); 546 sg_init_table(desc.outfrags, 4); 547 548 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 549 skcipher_request_zero(req); 550 return ret; 551 } 552 553 struct decryptor_desc { 554 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 555 struct skcipher_request *req; 556 struct scatterlist frags[4]; 557 int fragno; 558 int fraglen; 559 }; 560 561 static int 562 decryptor(struct scatterlist *sg, void *data) 563 { 564 struct decryptor_desc *desc = data; 565 int thislen = desc->fraglen + sg->length; 566 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 567 int fraglen, ret; 568 569 /* Worst case is 4 fragments: head, end of page 1, start 570 * of page 2, tail. Anything more is a bug. */ 571 BUG_ON(desc->fragno > 3); 572 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 573 sg->offset); 574 desc->fragno++; 575 desc->fraglen += sg->length; 576 577 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 578 thislen -= fraglen; 579 580 if (thislen == 0) 581 return 0; 582 583 sg_mark_end(&desc->frags[desc->fragno - 1]); 584 585 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 586 thislen, desc->iv); 587 588 ret = crypto_skcipher_decrypt(desc->req); 589 if (ret) 590 return ret; 591 592 sg_init_table(desc->frags, 4); 593 594 if (fraglen) { 595 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 596 sg->offset + sg->length - fraglen); 597 desc->fragno = 1; 598 desc->fraglen = fraglen; 599 } else { 600 desc->fragno = 0; 601 desc->fraglen = 0; 602 } 603 return 0; 604 } 605 606 int 607 gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 608 int offset) 609 { 610 int ret; 611 struct decryptor_desc desc; 612 SKCIPHER_REQUEST_ON_STACK(req, tfm); 613 614 /* XXXJBF: */ 615 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 616 617 skcipher_request_set_tfm(req, tfm); 618 skcipher_request_set_callback(req, 0, NULL, NULL); 619 620 memset(desc.iv, 0, sizeof(desc.iv)); 621 desc.req = req; 622 desc.fragno = 0; 623 desc.fraglen = 0; 624 625 sg_init_table(desc.frags, 4); 626 627 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 628 skcipher_request_zero(req); 629 return ret; 630 } 631 632 /* 633 * This function makes the assumption that it was ultimately called 634 * from gss_wrap(). 635 * 636 * The client auth_gss code moves any existing tail data into a 637 * separate page before calling gss_wrap. 638 * The server svcauth_gss code ensures that both the head and the 639 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. 640 * 641 * Even with that guarantee, this function may be called more than 642 * once in the processing of gss_wrap(). The best we can do is 643 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the 644 * largest expected shift will fit within RPC_MAX_AUTH_SIZE. 645 * At run-time we can verify that a single invocation of this 646 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. 647 */ 648 649 int 650 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) 651 { 652 u8 *p; 653 654 if (shiftlen == 0) 655 return 0; 656 657 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); 658 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); 659 660 p = buf->head[0].iov_base + base; 661 662 memmove(p + shiftlen, p, buf->head[0].iov_len - base); 663 664 buf->head[0].iov_len += shiftlen; 665 buf->len += shiftlen; 666 667 return 0; 668 } 669 670 static u32 671 gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 672 u32 offset, u8 *iv, struct page **pages, int encrypt) 673 { 674 u32 ret; 675 struct scatterlist sg[1]; 676 SKCIPHER_REQUEST_ON_STACK(req, cipher); 677 u8 *data; 678 struct page **save_pages; 679 u32 len = buf->len - offset; 680 681 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) { 682 WARN_ON(0); 683 return -ENOMEM; 684 } 685 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); 686 if (!data) 687 return -ENOMEM; 688 689 /* 690 * For encryption, we want to read from the cleartext 691 * page cache pages, and write the encrypted data to 692 * the supplied xdr_buf pages. 693 */ 694 save_pages = buf->pages; 695 if (encrypt) 696 buf->pages = pages; 697 698 ret = read_bytes_from_xdr_buf(buf, offset, data, len); 699 buf->pages = save_pages; 700 if (ret) 701 goto out; 702 703 sg_init_one(sg, data, len); 704 705 skcipher_request_set_tfm(req, cipher); 706 skcipher_request_set_callback(req, 0, NULL, NULL); 707 skcipher_request_set_crypt(req, sg, sg, len, iv); 708 709 if (encrypt) 710 ret = crypto_skcipher_encrypt(req); 711 else 712 ret = crypto_skcipher_decrypt(req); 713 714 skcipher_request_zero(req); 715 716 if (ret) 717 goto out; 718 719 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 720 721 out: 722 kfree(data); 723 return ret; 724 } 725 726 u32 727 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, 728 struct xdr_buf *buf, struct page **pages) 729 { 730 u32 err; 731 struct xdr_netobj hmac; 732 u8 *cksumkey; 733 u8 *ecptr; 734 struct crypto_skcipher *cipher, *aux_cipher; 735 int blocksize; 736 struct page **save_pages; 737 int nblocks, nbytes; 738 struct encryptor_desc desc; 739 u32 cbcbytes; 740 unsigned int usage; 741 742 if (kctx->initiate) { 743 cipher = kctx->initiator_enc; 744 aux_cipher = kctx->initiator_enc_aux; 745 cksumkey = kctx->initiator_integ; 746 usage = KG_USAGE_INITIATOR_SEAL; 747 } else { 748 cipher = kctx->acceptor_enc; 749 aux_cipher = kctx->acceptor_enc_aux; 750 cksumkey = kctx->acceptor_integ; 751 usage = KG_USAGE_ACCEPTOR_SEAL; 752 } 753 blocksize = crypto_skcipher_blocksize(cipher); 754 755 /* hide the gss token header and insert the confounder */ 756 offset += GSS_KRB5_TOK_HDR_LEN; 757 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) 758 return GSS_S_FAILURE; 759 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); 760 offset -= GSS_KRB5_TOK_HDR_LEN; 761 762 if (buf->tail[0].iov_base != NULL) { 763 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; 764 } else { 765 buf->tail[0].iov_base = buf->head[0].iov_base 766 + buf->head[0].iov_len; 767 buf->tail[0].iov_len = 0; 768 ecptr = buf->tail[0].iov_base; 769 } 770 771 /* copy plaintext gss token header after filler (if any) */ 772 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); 773 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; 774 buf->len += GSS_KRB5_TOK_HDR_LEN; 775 776 /* Do the HMAC */ 777 hmac.len = GSS_KRB5_MAX_CKSUM_LEN; 778 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; 779 780 /* 781 * When we are called, pages points to the real page cache 782 * data -- which we can't go and encrypt! buf->pages points 783 * to scratch pages which we are going to send off to the 784 * client/server. Swap in the plaintext pages to calculate 785 * the hmac. 786 */ 787 save_pages = buf->pages; 788 buf->pages = pages; 789 790 err = make_checksum_v2(kctx, NULL, 0, buf, 791 offset + GSS_KRB5_TOK_HDR_LEN, 792 cksumkey, usage, &hmac); 793 buf->pages = save_pages; 794 if (err) 795 return GSS_S_FAILURE; 796 797 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; 798 nblocks = (nbytes + blocksize - 1) / blocksize; 799 cbcbytes = 0; 800 if (nblocks > 2) 801 cbcbytes = (nblocks - 2) * blocksize; 802 803 memset(desc.iv, 0, sizeof(desc.iv)); 804 805 if (cbcbytes) { 806 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 807 808 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 809 desc.fragno = 0; 810 desc.fraglen = 0; 811 desc.pages = pages; 812 desc.outbuf = buf; 813 desc.req = req; 814 815 skcipher_request_set_tfm(req, aux_cipher); 816 skcipher_request_set_callback(req, 0, NULL, NULL); 817 818 sg_init_table(desc.infrags, 4); 819 sg_init_table(desc.outfrags, 4); 820 821 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 822 cbcbytes, encryptor, &desc); 823 skcipher_request_zero(req); 824 if (err) 825 goto out_err; 826 } 827 828 /* Make sure IV carries forward from any CBC results. */ 829 err = gss_krb5_cts_crypt(cipher, buf, 830 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, 831 desc.iv, pages, 1); 832 if (err) { 833 err = GSS_S_FAILURE; 834 goto out_err; 835 } 836 837 /* Now update buf to account for HMAC */ 838 buf->tail[0].iov_len += kctx->gk5e->cksumlength; 839 buf->len += kctx->gk5e->cksumlength; 840 841 out_err: 842 if (err) 843 err = GSS_S_FAILURE; 844 return err; 845 } 846 847 u32 848 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, 849 u32 *headskip, u32 *tailskip) 850 { 851 struct xdr_buf subbuf; 852 u32 ret = 0; 853 u8 *cksum_key; 854 struct crypto_skcipher *cipher, *aux_cipher; 855 struct xdr_netobj our_hmac_obj; 856 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 857 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 858 int nblocks, blocksize, cbcbytes; 859 struct decryptor_desc desc; 860 unsigned int usage; 861 862 if (kctx->initiate) { 863 cipher = kctx->acceptor_enc; 864 aux_cipher = kctx->acceptor_enc_aux; 865 cksum_key = kctx->acceptor_integ; 866 usage = KG_USAGE_ACCEPTOR_SEAL; 867 } else { 868 cipher = kctx->initiator_enc; 869 aux_cipher = kctx->initiator_enc_aux; 870 cksum_key = kctx->initiator_integ; 871 usage = KG_USAGE_INITIATOR_SEAL; 872 } 873 blocksize = crypto_skcipher_blocksize(cipher); 874 875 876 /* create a segment skipping the header and leaving out the checksum */ 877 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, 878 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - 879 kctx->gk5e->cksumlength)); 880 881 nblocks = (subbuf.len + blocksize - 1) / blocksize; 882 883 cbcbytes = 0; 884 if (nblocks > 2) 885 cbcbytes = (nblocks - 2) * blocksize; 886 887 memset(desc.iv, 0, sizeof(desc.iv)); 888 889 if (cbcbytes) { 890 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 891 892 desc.fragno = 0; 893 desc.fraglen = 0; 894 desc.req = req; 895 896 skcipher_request_set_tfm(req, aux_cipher); 897 skcipher_request_set_callback(req, 0, NULL, NULL); 898 899 sg_init_table(desc.frags, 4); 900 901 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 902 skcipher_request_zero(req); 903 if (ret) 904 goto out_err; 905 } 906 907 /* Make sure IV carries forward from any CBC results. */ 908 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); 909 if (ret) 910 goto out_err; 911 912 913 /* Calculate our hmac over the plaintext data */ 914 our_hmac_obj.len = sizeof(our_hmac); 915 our_hmac_obj.data = our_hmac; 916 917 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, 918 cksum_key, usage, &our_hmac_obj); 919 if (ret) 920 goto out_err; 921 922 /* Get the packet's hmac value */ 923 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, 924 pkt_hmac, kctx->gk5e->cksumlength); 925 if (ret) 926 goto out_err; 927 928 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { 929 ret = GSS_S_BAD_SIG; 930 goto out_err; 931 } 932 *headskip = kctx->gk5e->conflen; 933 *tailskip = kctx->gk5e->cksumlength; 934 out_err: 935 if (ret && ret != GSS_S_BAD_SIG) 936 ret = GSS_S_FAILURE; 937 return ret; 938 } 939 940 /* 941 * Compute Kseq given the initial session key and the checksum. 942 * Set the key of the given cipher. 943 */ 944 int 945 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 946 unsigned char *cksum) 947 { 948 struct crypto_shash *hmac; 949 struct shash_desc *desc; 950 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 951 u32 zeroconstant = 0; 952 int err; 953 954 dprintk("%s: entered\n", __func__); 955 956 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 957 if (IS_ERR(hmac)) { 958 dprintk("%s: error %ld, allocating hash '%s'\n", 959 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 960 return PTR_ERR(hmac); 961 } 962 963 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 964 GFP_NOFS); 965 if (!desc) { 966 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 967 __func__, kctx->gk5e->cksum_name); 968 crypto_free_shash(hmac); 969 return -ENOMEM; 970 } 971 972 desc->tfm = hmac; 973 desc->flags = 0; 974 975 /* Compute intermediate Kseq from session key */ 976 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 977 if (err) 978 goto out_err; 979 980 err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 981 if (err) 982 goto out_err; 983 984 /* Compute final Kseq from the checksum and intermediate Kseq */ 985 err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 986 if (err) 987 goto out_err; 988 989 err = crypto_shash_digest(desc, cksum, 8, Kseq); 990 if (err) 991 goto out_err; 992 993 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 994 if (err) 995 goto out_err; 996 997 err = 0; 998 999 out_err: 1000 kzfree(desc); 1001 crypto_free_shash(hmac); 1002 dprintk("%s: returning %d\n", __func__, err); 1003 return err; 1004 } 1005 1006 /* 1007 * Compute Kcrypt given the initial session key and the plaintext seqnum. 1008 * Set the key of cipher kctx->enc. 1009 */ 1010 int 1011 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 1012 s32 seqnum) 1013 { 1014 struct crypto_shash *hmac; 1015 struct shash_desc *desc; 1016 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1017 u8 zeroconstant[4] = {0}; 1018 u8 seqnumarray[4]; 1019 int err, i; 1020 1021 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1022 1023 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1024 if (IS_ERR(hmac)) { 1025 dprintk("%s: error %ld, allocating hash '%s'\n", 1026 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1027 return PTR_ERR(hmac); 1028 } 1029 1030 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 1031 GFP_NOFS); 1032 if (!desc) { 1033 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1034 __func__, kctx->gk5e->cksum_name); 1035 crypto_free_shash(hmac); 1036 return -ENOMEM; 1037 } 1038 1039 desc->tfm = hmac; 1040 desc->flags = 0; 1041 1042 /* Compute intermediate Kcrypt from session key */ 1043 for (i = 0; i < kctx->gk5e->keylength; i++) 1044 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1045 1046 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1047 if (err) 1048 goto out_err; 1049 1050 err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1051 if (err) 1052 goto out_err; 1053 1054 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1055 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1056 if (err) 1057 goto out_err; 1058 1059 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); 1060 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); 1061 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1062 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1063 1064 err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1065 if (err) 1066 goto out_err; 1067 1068 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1069 if (err) 1070 goto out_err; 1071 1072 err = 0; 1073 1074 out_err: 1075 kzfree(desc); 1076 crypto_free_shash(hmac); 1077 dprintk("%s: returning %d\n", __func__, err); 1078 return err; 1079 } 1080 1081