1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <crypto/hash.h> 38 #include <crypto/skcipher.h> 39 #include <linux/err.h> 40 #include <linux/types.h> 41 #include <linux/mm.h> 42 #include <linux/scatterlist.h> 43 #include <linux/highmem.h> 44 #include <linux/pagemap.h> 45 #include <linux/random.h> 46 #include <linux/sunrpc/gss_krb5.h> 47 #include <linux/sunrpc/xdr.h> 48 49 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 50 # define RPCDBG_FACILITY RPCDBG_AUTH 51 #endif 52 53 u32 54 krb5_encrypt( 55 struct crypto_skcipher *tfm, 56 void * iv, 57 void * in, 58 void * out, 59 int length) 60 { 61 u32 ret = -EINVAL; 62 struct scatterlist sg[1]; 63 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 64 SKCIPHER_REQUEST_ON_STACK(req, tfm); 65 66 if (length % crypto_skcipher_blocksize(tfm) != 0) 67 goto out; 68 69 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 70 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 71 crypto_skcipher_ivsize(tfm)); 72 goto out; 73 } 74 75 if (iv) 76 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 77 78 memcpy(out, in, length); 79 sg_init_one(sg, out, length); 80 81 skcipher_request_set_callback(req, 0, NULL, NULL); 82 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 83 84 ret = crypto_skcipher_encrypt(req); 85 skcipher_request_zero(req); 86 out: 87 dprintk("RPC: krb5_encrypt returns %d\n", ret); 88 return ret; 89 } 90 91 u32 92 krb5_decrypt( 93 struct crypto_skcipher *tfm, 94 void * iv, 95 void * in, 96 void * out, 97 int length) 98 { 99 u32 ret = -EINVAL; 100 struct scatterlist sg[1]; 101 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 102 SKCIPHER_REQUEST_ON_STACK(req, tfm); 103 104 if (length % crypto_skcipher_blocksize(tfm) != 0) 105 goto out; 106 107 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 108 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 109 crypto_skcipher_ivsize(tfm)); 110 goto out; 111 } 112 if (iv) 113 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 114 115 memcpy(out, in, length); 116 sg_init_one(sg, out, length); 117 118 skcipher_request_set_callback(req, 0, NULL, NULL); 119 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 120 121 ret = crypto_skcipher_decrypt(req); 122 skcipher_request_zero(req); 123 out: 124 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 125 return ret; 126 } 127 128 static int 129 checksummer(struct scatterlist *sg, void *data) 130 { 131 struct ahash_request *req = data; 132 133 ahash_request_set_crypt(req, sg, NULL, sg->length); 134 135 return crypto_ahash_update(req); 136 } 137 138 static int 139 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) 140 { 141 unsigned int ms_usage; 142 143 switch (usage) { 144 case KG_USAGE_SIGN: 145 ms_usage = 15; 146 break; 147 case KG_USAGE_SEAL: 148 ms_usage = 13; 149 break; 150 default: 151 return -EINVAL; 152 } 153 salt[0] = (ms_usage >> 0) & 0xff; 154 salt[1] = (ms_usage >> 8) & 0xff; 155 salt[2] = (ms_usage >> 16) & 0xff; 156 salt[3] = (ms_usage >> 24) & 0xff; 157 158 return 0; 159 } 160 161 static u32 162 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, 163 struct xdr_buf *body, int body_offset, u8 *cksumkey, 164 unsigned int usage, struct xdr_netobj *cksumout) 165 { 166 struct scatterlist sg[1]; 167 int err; 168 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 169 u8 rc4salt[4]; 170 struct crypto_ahash *md5; 171 struct crypto_ahash *hmac_md5; 172 struct ahash_request *req; 173 174 if (cksumkey == NULL) 175 return GSS_S_FAILURE; 176 177 if (cksumout->len < kctx->gk5e->cksumlength) { 178 dprintk("%s: checksum buffer length, %u, too small for %s\n", 179 __func__, cksumout->len, kctx->gk5e->name); 180 return GSS_S_FAILURE; 181 } 182 183 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { 184 dprintk("%s: invalid usage value %u\n", __func__, usage); 185 return GSS_S_FAILURE; 186 } 187 188 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 189 if (IS_ERR(md5)) 190 return GSS_S_FAILURE; 191 192 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 193 CRYPTO_ALG_ASYNC); 194 if (IS_ERR(hmac_md5)) { 195 crypto_free_ahash(md5); 196 return GSS_S_FAILURE; 197 } 198 199 req = ahash_request_alloc(md5, GFP_KERNEL); 200 if (!req) { 201 crypto_free_ahash(hmac_md5); 202 crypto_free_ahash(md5); 203 return GSS_S_FAILURE; 204 } 205 206 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 207 208 err = crypto_ahash_init(req); 209 if (err) 210 goto out; 211 sg_init_one(sg, rc4salt, 4); 212 ahash_request_set_crypt(req, sg, NULL, 4); 213 err = crypto_ahash_update(req); 214 if (err) 215 goto out; 216 217 sg_init_one(sg, header, hdrlen); 218 ahash_request_set_crypt(req, sg, NULL, hdrlen); 219 err = crypto_ahash_update(req); 220 if (err) 221 goto out; 222 err = xdr_process_buf(body, body_offset, body->len - body_offset, 223 checksummer, req); 224 if (err) 225 goto out; 226 ahash_request_set_crypt(req, NULL, checksumdata, 0); 227 err = crypto_ahash_final(req); 228 if (err) 229 goto out; 230 231 ahash_request_free(req); 232 req = ahash_request_alloc(hmac_md5, GFP_KERNEL); 233 if (!req) { 234 crypto_free_ahash(hmac_md5); 235 crypto_free_ahash(md5); 236 return GSS_S_FAILURE; 237 } 238 239 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 240 241 err = crypto_ahash_init(req); 242 if (err) 243 goto out; 244 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 245 if (err) 246 goto out; 247 248 sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 249 ahash_request_set_crypt(req, sg, checksumdata, 250 crypto_ahash_digestsize(md5)); 251 err = crypto_ahash_digest(req); 252 if (err) 253 goto out; 254 255 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 256 cksumout->len = kctx->gk5e->cksumlength; 257 out: 258 ahash_request_free(req); 259 crypto_free_ahash(md5); 260 crypto_free_ahash(hmac_md5); 261 return err ? GSS_S_FAILURE : 0; 262 } 263 264 /* 265 * checksum the plaintext data and hdrlen bytes of the token header 266 * The checksum is performed over the first 8 bytes of the 267 * gss token header and then over the data body 268 */ 269 u32 270 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, 271 struct xdr_buf *body, int body_offset, u8 *cksumkey, 272 unsigned int usage, struct xdr_netobj *cksumout) 273 { 274 struct crypto_ahash *tfm; 275 struct ahash_request *req; 276 struct scatterlist sg[1]; 277 int err; 278 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 279 unsigned int checksumlen; 280 281 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 282 return make_checksum_hmac_md5(kctx, header, hdrlen, 283 body, body_offset, 284 cksumkey, usage, cksumout); 285 286 if (cksumout->len < kctx->gk5e->cksumlength) { 287 dprintk("%s: checksum buffer length, %u, too small for %s\n", 288 __func__, cksumout->len, kctx->gk5e->name); 289 return GSS_S_FAILURE; 290 } 291 292 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 293 if (IS_ERR(tfm)) 294 return GSS_S_FAILURE; 295 296 req = ahash_request_alloc(tfm, GFP_KERNEL); 297 if (!req) { 298 crypto_free_ahash(tfm); 299 return GSS_S_FAILURE; 300 } 301 302 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 303 304 checksumlen = crypto_ahash_digestsize(tfm); 305 306 if (cksumkey != NULL) { 307 err = crypto_ahash_setkey(tfm, cksumkey, 308 kctx->gk5e->keylength); 309 if (err) 310 goto out; 311 } 312 313 err = crypto_ahash_init(req); 314 if (err) 315 goto out; 316 sg_init_one(sg, header, hdrlen); 317 ahash_request_set_crypt(req, sg, NULL, hdrlen); 318 err = crypto_ahash_update(req); 319 if (err) 320 goto out; 321 err = xdr_process_buf(body, body_offset, body->len - body_offset, 322 checksummer, req); 323 if (err) 324 goto out; 325 ahash_request_set_crypt(req, NULL, checksumdata, 0); 326 err = crypto_ahash_final(req); 327 if (err) 328 goto out; 329 330 switch (kctx->gk5e->ctype) { 331 case CKSUMTYPE_RSA_MD5: 332 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, 333 checksumdata, checksumlen); 334 if (err) 335 goto out; 336 memcpy(cksumout->data, 337 checksumdata + checksumlen - kctx->gk5e->cksumlength, 338 kctx->gk5e->cksumlength); 339 break; 340 case CKSUMTYPE_HMAC_SHA1_DES3: 341 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 342 break; 343 default: 344 BUG(); 345 break; 346 } 347 cksumout->len = kctx->gk5e->cksumlength; 348 out: 349 ahash_request_free(req); 350 crypto_free_ahash(tfm); 351 return err ? GSS_S_FAILURE : 0; 352 } 353 354 /* 355 * checksum the plaintext data and hdrlen bytes of the token header 356 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data 357 * body then over the first 16 octets of the MIC token 358 * Inclusion of the header data in the calculation of the 359 * checksum is optional. 360 */ 361 u32 362 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, 363 struct xdr_buf *body, int body_offset, u8 *cksumkey, 364 unsigned int usage, struct xdr_netobj *cksumout) 365 { 366 struct crypto_ahash *tfm; 367 struct ahash_request *req; 368 struct scatterlist sg[1]; 369 int err; 370 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 371 unsigned int checksumlen; 372 373 if (kctx->gk5e->keyed_cksum == 0) { 374 dprintk("%s: expected keyed hash for %s\n", 375 __func__, kctx->gk5e->name); 376 return GSS_S_FAILURE; 377 } 378 if (cksumkey == NULL) { 379 dprintk("%s: no key supplied for %s\n", 380 __func__, kctx->gk5e->name); 381 return GSS_S_FAILURE; 382 } 383 384 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 385 if (IS_ERR(tfm)) 386 return GSS_S_FAILURE; 387 checksumlen = crypto_ahash_digestsize(tfm); 388 389 req = ahash_request_alloc(tfm, GFP_KERNEL); 390 if (!req) { 391 crypto_free_ahash(tfm); 392 return GSS_S_FAILURE; 393 } 394 395 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 396 397 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 398 if (err) 399 goto out; 400 401 err = crypto_ahash_init(req); 402 if (err) 403 goto out; 404 err = xdr_process_buf(body, body_offset, body->len - body_offset, 405 checksummer, req); 406 if (err) 407 goto out; 408 if (header != NULL) { 409 sg_init_one(sg, header, hdrlen); 410 ahash_request_set_crypt(req, sg, NULL, hdrlen); 411 err = crypto_ahash_update(req); 412 if (err) 413 goto out; 414 } 415 ahash_request_set_crypt(req, NULL, checksumdata, 0); 416 err = crypto_ahash_final(req); 417 if (err) 418 goto out; 419 420 cksumout->len = kctx->gk5e->cksumlength; 421 422 switch (kctx->gk5e->ctype) { 423 case CKSUMTYPE_HMAC_SHA1_96_AES128: 424 case CKSUMTYPE_HMAC_SHA1_96_AES256: 425 /* note that this truncates the hash */ 426 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 427 break; 428 default: 429 BUG(); 430 break; 431 } 432 out: 433 ahash_request_free(req); 434 crypto_free_ahash(tfm); 435 return err ? GSS_S_FAILURE : 0; 436 } 437 438 struct encryptor_desc { 439 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 440 struct skcipher_request *req; 441 int pos; 442 struct xdr_buf *outbuf; 443 struct page **pages; 444 struct scatterlist infrags[4]; 445 struct scatterlist outfrags[4]; 446 int fragno; 447 int fraglen; 448 }; 449 450 static int 451 encryptor(struct scatterlist *sg, void *data) 452 { 453 struct encryptor_desc *desc = data; 454 struct xdr_buf *outbuf = desc->outbuf; 455 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 456 struct page *in_page; 457 int thislen = desc->fraglen + sg->length; 458 int fraglen, ret; 459 int page_pos; 460 461 /* Worst case is 4 fragments: head, end of page 1, start 462 * of page 2, tail. Anything more is a bug. */ 463 BUG_ON(desc->fragno > 3); 464 465 page_pos = desc->pos - outbuf->head[0].iov_len; 466 if (page_pos >= 0 && page_pos < outbuf->page_len) { 467 /* pages are not in place: */ 468 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; 469 in_page = desc->pages[i]; 470 } else { 471 in_page = sg_page(sg); 472 } 473 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 474 sg->offset); 475 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 476 sg->offset); 477 desc->fragno++; 478 desc->fraglen += sg->length; 479 desc->pos += sg->length; 480 481 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 482 thislen -= fraglen; 483 484 if (thislen == 0) 485 return 0; 486 487 sg_mark_end(&desc->infrags[desc->fragno - 1]); 488 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 489 490 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 491 thislen, desc->iv); 492 493 ret = crypto_skcipher_encrypt(desc->req); 494 if (ret) 495 return ret; 496 497 sg_init_table(desc->infrags, 4); 498 sg_init_table(desc->outfrags, 4); 499 500 if (fraglen) { 501 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 502 sg->offset + sg->length - fraglen); 503 desc->infrags[0] = desc->outfrags[0]; 504 sg_assign_page(&desc->infrags[0], in_page); 505 desc->fragno = 1; 506 desc->fraglen = fraglen; 507 } else { 508 desc->fragno = 0; 509 desc->fraglen = 0; 510 } 511 return 0; 512 } 513 514 int 515 gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 516 int offset, struct page **pages) 517 { 518 int ret; 519 struct encryptor_desc desc; 520 SKCIPHER_REQUEST_ON_STACK(req, tfm); 521 522 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 523 524 skcipher_request_set_tfm(req, tfm); 525 skcipher_request_set_callback(req, 0, NULL, NULL); 526 527 memset(desc.iv, 0, sizeof(desc.iv)); 528 desc.req = req; 529 desc.pos = offset; 530 desc.outbuf = buf; 531 desc.pages = pages; 532 desc.fragno = 0; 533 desc.fraglen = 0; 534 535 sg_init_table(desc.infrags, 4); 536 sg_init_table(desc.outfrags, 4); 537 538 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 539 skcipher_request_zero(req); 540 return ret; 541 } 542 543 struct decryptor_desc { 544 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 545 struct skcipher_request *req; 546 struct scatterlist frags[4]; 547 int fragno; 548 int fraglen; 549 }; 550 551 static int 552 decryptor(struct scatterlist *sg, void *data) 553 { 554 struct decryptor_desc *desc = data; 555 int thislen = desc->fraglen + sg->length; 556 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 557 int fraglen, ret; 558 559 /* Worst case is 4 fragments: head, end of page 1, start 560 * of page 2, tail. Anything more is a bug. */ 561 BUG_ON(desc->fragno > 3); 562 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 563 sg->offset); 564 desc->fragno++; 565 desc->fraglen += sg->length; 566 567 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 568 thislen -= fraglen; 569 570 if (thislen == 0) 571 return 0; 572 573 sg_mark_end(&desc->frags[desc->fragno - 1]); 574 575 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 576 thislen, desc->iv); 577 578 ret = crypto_skcipher_decrypt(desc->req); 579 if (ret) 580 return ret; 581 582 sg_init_table(desc->frags, 4); 583 584 if (fraglen) { 585 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 586 sg->offset + sg->length - fraglen); 587 desc->fragno = 1; 588 desc->fraglen = fraglen; 589 } else { 590 desc->fragno = 0; 591 desc->fraglen = 0; 592 } 593 return 0; 594 } 595 596 int 597 gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 598 int offset) 599 { 600 int ret; 601 struct decryptor_desc desc; 602 SKCIPHER_REQUEST_ON_STACK(req, tfm); 603 604 /* XXXJBF: */ 605 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 606 607 skcipher_request_set_tfm(req, tfm); 608 skcipher_request_set_callback(req, 0, NULL, NULL); 609 610 memset(desc.iv, 0, sizeof(desc.iv)); 611 desc.req = req; 612 desc.fragno = 0; 613 desc.fraglen = 0; 614 615 sg_init_table(desc.frags, 4); 616 617 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 618 skcipher_request_zero(req); 619 return ret; 620 } 621 622 /* 623 * This function makes the assumption that it was ultimately called 624 * from gss_wrap(). 625 * 626 * The client auth_gss code moves any existing tail data into a 627 * separate page before calling gss_wrap. 628 * The server svcauth_gss code ensures that both the head and the 629 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. 630 * 631 * Even with that guarantee, this function may be called more than 632 * once in the processing of gss_wrap(). The best we can do is 633 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the 634 * largest expected shift will fit within RPC_MAX_AUTH_SIZE. 635 * At run-time we can verify that a single invocation of this 636 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. 637 */ 638 639 int 640 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) 641 { 642 u8 *p; 643 644 if (shiftlen == 0) 645 return 0; 646 647 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); 648 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); 649 650 p = buf->head[0].iov_base + base; 651 652 memmove(p + shiftlen, p, buf->head[0].iov_len - base); 653 654 buf->head[0].iov_len += shiftlen; 655 buf->len += shiftlen; 656 657 return 0; 658 } 659 660 static u32 661 gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 662 u32 offset, u8 *iv, struct page **pages, int encrypt) 663 { 664 u32 ret; 665 struct scatterlist sg[1]; 666 SKCIPHER_REQUEST_ON_STACK(req, cipher); 667 u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; 668 struct page **save_pages; 669 u32 len = buf->len - offset; 670 671 if (len > ARRAY_SIZE(data)) { 672 WARN_ON(0); 673 return -ENOMEM; 674 } 675 676 /* 677 * For encryption, we want to read from the cleartext 678 * page cache pages, and write the encrypted data to 679 * the supplied xdr_buf pages. 680 */ 681 save_pages = buf->pages; 682 if (encrypt) 683 buf->pages = pages; 684 685 ret = read_bytes_from_xdr_buf(buf, offset, data, len); 686 buf->pages = save_pages; 687 if (ret) 688 goto out; 689 690 sg_init_one(sg, data, len); 691 692 skcipher_request_set_tfm(req, cipher); 693 skcipher_request_set_callback(req, 0, NULL, NULL); 694 skcipher_request_set_crypt(req, sg, sg, len, iv); 695 696 if (encrypt) 697 ret = crypto_skcipher_encrypt(req); 698 else 699 ret = crypto_skcipher_decrypt(req); 700 701 skcipher_request_zero(req); 702 703 if (ret) 704 goto out; 705 706 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 707 708 out: 709 return ret; 710 } 711 712 u32 713 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, 714 struct xdr_buf *buf, struct page **pages) 715 { 716 u32 err; 717 struct xdr_netobj hmac; 718 u8 *cksumkey; 719 u8 *ecptr; 720 struct crypto_skcipher *cipher, *aux_cipher; 721 int blocksize; 722 struct page **save_pages; 723 int nblocks, nbytes; 724 struct encryptor_desc desc; 725 u32 cbcbytes; 726 unsigned int usage; 727 728 if (kctx->initiate) { 729 cipher = kctx->initiator_enc; 730 aux_cipher = kctx->initiator_enc_aux; 731 cksumkey = kctx->initiator_integ; 732 usage = KG_USAGE_INITIATOR_SEAL; 733 } else { 734 cipher = kctx->acceptor_enc; 735 aux_cipher = kctx->acceptor_enc_aux; 736 cksumkey = kctx->acceptor_integ; 737 usage = KG_USAGE_ACCEPTOR_SEAL; 738 } 739 blocksize = crypto_skcipher_blocksize(cipher); 740 741 /* hide the gss token header and insert the confounder */ 742 offset += GSS_KRB5_TOK_HDR_LEN; 743 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) 744 return GSS_S_FAILURE; 745 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); 746 offset -= GSS_KRB5_TOK_HDR_LEN; 747 748 if (buf->tail[0].iov_base != NULL) { 749 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; 750 } else { 751 buf->tail[0].iov_base = buf->head[0].iov_base 752 + buf->head[0].iov_len; 753 buf->tail[0].iov_len = 0; 754 ecptr = buf->tail[0].iov_base; 755 } 756 757 /* copy plaintext gss token header after filler (if any) */ 758 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); 759 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; 760 buf->len += GSS_KRB5_TOK_HDR_LEN; 761 762 /* Do the HMAC */ 763 hmac.len = GSS_KRB5_MAX_CKSUM_LEN; 764 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; 765 766 /* 767 * When we are called, pages points to the real page cache 768 * data -- which we can't go and encrypt! buf->pages points 769 * to scratch pages which we are going to send off to the 770 * client/server. Swap in the plaintext pages to calculate 771 * the hmac. 772 */ 773 save_pages = buf->pages; 774 buf->pages = pages; 775 776 err = make_checksum_v2(kctx, NULL, 0, buf, 777 offset + GSS_KRB5_TOK_HDR_LEN, 778 cksumkey, usage, &hmac); 779 buf->pages = save_pages; 780 if (err) 781 return GSS_S_FAILURE; 782 783 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; 784 nblocks = (nbytes + blocksize - 1) / blocksize; 785 cbcbytes = 0; 786 if (nblocks > 2) 787 cbcbytes = (nblocks - 2) * blocksize; 788 789 memset(desc.iv, 0, sizeof(desc.iv)); 790 791 if (cbcbytes) { 792 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 793 794 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 795 desc.fragno = 0; 796 desc.fraglen = 0; 797 desc.pages = pages; 798 desc.outbuf = buf; 799 desc.req = req; 800 801 skcipher_request_set_tfm(req, aux_cipher); 802 skcipher_request_set_callback(req, 0, NULL, NULL); 803 804 sg_init_table(desc.infrags, 4); 805 sg_init_table(desc.outfrags, 4); 806 807 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 808 cbcbytes, encryptor, &desc); 809 skcipher_request_zero(req); 810 if (err) 811 goto out_err; 812 } 813 814 /* Make sure IV carries forward from any CBC results. */ 815 err = gss_krb5_cts_crypt(cipher, buf, 816 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, 817 desc.iv, pages, 1); 818 if (err) { 819 err = GSS_S_FAILURE; 820 goto out_err; 821 } 822 823 /* Now update buf to account for HMAC */ 824 buf->tail[0].iov_len += kctx->gk5e->cksumlength; 825 buf->len += kctx->gk5e->cksumlength; 826 827 out_err: 828 if (err) 829 err = GSS_S_FAILURE; 830 return err; 831 } 832 833 u32 834 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, 835 u32 *headskip, u32 *tailskip) 836 { 837 struct xdr_buf subbuf; 838 u32 ret = 0; 839 u8 *cksum_key; 840 struct crypto_skcipher *cipher, *aux_cipher; 841 struct xdr_netobj our_hmac_obj; 842 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 843 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 844 int nblocks, blocksize, cbcbytes; 845 struct decryptor_desc desc; 846 unsigned int usage; 847 848 if (kctx->initiate) { 849 cipher = kctx->acceptor_enc; 850 aux_cipher = kctx->acceptor_enc_aux; 851 cksum_key = kctx->acceptor_integ; 852 usage = KG_USAGE_ACCEPTOR_SEAL; 853 } else { 854 cipher = kctx->initiator_enc; 855 aux_cipher = kctx->initiator_enc_aux; 856 cksum_key = kctx->initiator_integ; 857 usage = KG_USAGE_INITIATOR_SEAL; 858 } 859 blocksize = crypto_skcipher_blocksize(cipher); 860 861 862 /* create a segment skipping the header and leaving out the checksum */ 863 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, 864 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - 865 kctx->gk5e->cksumlength)); 866 867 nblocks = (subbuf.len + blocksize - 1) / blocksize; 868 869 cbcbytes = 0; 870 if (nblocks > 2) 871 cbcbytes = (nblocks - 2) * blocksize; 872 873 memset(desc.iv, 0, sizeof(desc.iv)); 874 875 if (cbcbytes) { 876 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 877 878 desc.fragno = 0; 879 desc.fraglen = 0; 880 desc.req = req; 881 882 skcipher_request_set_tfm(req, aux_cipher); 883 skcipher_request_set_callback(req, 0, NULL, NULL); 884 885 sg_init_table(desc.frags, 4); 886 887 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 888 skcipher_request_zero(req); 889 if (ret) 890 goto out_err; 891 } 892 893 /* Make sure IV carries forward from any CBC results. */ 894 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); 895 if (ret) 896 goto out_err; 897 898 899 /* Calculate our hmac over the plaintext data */ 900 our_hmac_obj.len = sizeof(our_hmac); 901 our_hmac_obj.data = our_hmac; 902 903 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, 904 cksum_key, usage, &our_hmac_obj); 905 if (ret) 906 goto out_err; 907 908 /* Get the packet's hmac value */ 909 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, 910 pkt_hmac, kctx->gk5e->cksumlength); 911 if (ret) 912 goto out_err; 913 914 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { 915 ret = GSS_S_BAD_SIG; 916 goto out_err; 917 } 918 *headskip = kctx->gk5e->conflen; 919 *tailskip = kctx->gk5e->cksumlength; 920 out_err: 921 if (ret && ret != GSS_S_BAD_SIG) 922 ret = GSS_S_FAILURE; 923 return ret; 924 } 925 926 /* 927 * Compute Kseq given the initial session key and the checksum. 928 * Set the key of the given cipher. 929 */ 930 int 931 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 932 unsigned char *cksum) 933 { 934 struct crypto_shash *hmac; 935 struct shash_desc *desc; 936 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 937 u32 zeroconstant = 0; 938 int err; 939 940 dprintk("%s: entered\n", __func__); 941 942 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 943 if (IS_ERR(hmac)) { 944 dprintk("%s: error %ld, allocating hash '%s'\n", 945 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 946 return PTR_ERR(hmac); 947 } 948 949 desc = kmalloc(sizeof(*desc), GFP_KERNEL); 950 if (!desc) { 951 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 952 __func__, kctx->gk5e->cksum_name); 953 crypto_free_shash(hmac); 954 return -ENOMEM; 955 } 956 957 desc->tfm = hmac; 958 desc->flags = 0; 959 960 /* Compute intermediate Kseq from session key */ 961 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 962 if (err) 963 goto out_err; 964 965 err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 966 if (err) 967 goto out_err; 968 969 /* Compute final Kseq from the checksum and intermediate Kseq */ 970 err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 971 if (err) 972 goto out_err; 973 974 err = crypto_shash_digest(desc, cksum, 8, Kseq); 975 if (err) 976 goto out_err; 977 978 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 979 if (err) 980 goto out_err; 981 982 err = 0; 983 984 out_err: 985 kzfree(desc); 986 crypto_free_shash(hmac); 987 dprintk("%s: returning %d\n", __func__, err); 988 return err; 989 } 990 991 /* 992 * Compute Kcrypt given the initial session key and the plaintext seqnum. 993 * Set the key of cipher kctx->enc. 994 */ 995 int 996 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 997 s32 seqnum) 998 { 999 struct crypto_shash *hmac; 1000 struct shash_desc *desc; 1001 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1002 u8 zeroconstant[4] = {0}; 1003 u8 seqnumarray[4]; 1004 int err, i; 1005 1006 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1007 1008 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1009 if (IS_ERR(hmac)) { 1010 dprintk("%s: error %ld, allocating hash '%s'\n", 1011 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1012 return PTR_ERR(hmac); 1013 } 1014 1015 desc = kmalloc(sizeof(*desc), GFP_KERNEL); 1016 if (!desc) { 1017 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1018 __func__, kctx->gk5e->cksum_name); 1019 crypto_free_shash(hmac); 1020 return -ENOMEM; 1021 } 1022 1023 desc->tfm = hmac; 1024 desc->flags = 0; 1025 1026 /* Compute intermediate Kcrypt from session key */ 1027 for (i = 0; i < kctx->gk5e->keylength; i++) 1028 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1029 1030 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1031 if (err) 1032 goto out_err; 1033 1034 err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1035 if (err) 1036 goto out_err; 1037 1038 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1039 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1040 if (err) 1041 goto out_err; 1042 1043 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); 1044 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); 1045 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1046 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1047 1048 err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1049 if (err) 1050 goto out_err; 1051 1052 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1053 if (err) 1054 goto out_err; 1055 1056 err = 0; 1057 1058 out_err: 1059 kzfree(desc); 1060 crypto_free_shash(hmac); 1061 dprintk("%s: returning %d\n", __func__, err); 1062 return err; 1063 } 1064 1065