1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <crypto/hash.h> 38 #include <crypto/skcipher.h> 39 #include <linux/err.h> 40 #include <linux/types.h> 41 #include <linux/mm.h> 42 #include <linux/scatterlist.h> 43 #include <linux/highmem.h> 44 #include <linux/pagemap.h> 45 #include <linux/random.h> 46 #include <linux/sunrpc/gss_krb5.h> 47 #include <linux/sunrpc/xdr.h> 48 49 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 50 # define RPCDBG_FACILITY RPCDBG_AUTH 51 #endif 52 53 u32 54 krb5_encrypt( 55 struct crypto_skcipher *tfm, 56 void * iv, 57 void * in, 58 void * out, 59 int length) 60 { 61 u32 ret = -EINVAL; 62 struct scatterlist sg[1]; 63 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 64 SKCIPHER_REQUEST_ON_STACK(req, tfm); 65 66 if (length % crypto_skcipher_blocksize(tfm) != 0) 67 goto out; 68 69 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 70 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 71 crypto_skcipher_ivsize(tfm)); 72 goto out; 73 } 74 75 if (iv) 76 memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm)); 77 78 memcpy(out, in, length); 79 sg_init_one(sg, out, length); 80 81 skcipher_request_set_tfm(req, tfm); 82 skcipher_request_set_callback(req, 0, NULL, NULL); 83 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 84 85 ret = crypto_skcipher_encrypt(req); 86 skcipher_request_zero(req); 87 out: 88 dprintk("RPC: krb5_encrypt returns %d\n", ret); 89 return ret; 90 } 91 92 u32 93 krb5_decrypt( 94 struct crypto_skcipher *tfm, 95 void * iv, 96 void * in, 97 void * out, 98 int length) 99 { 100 u32 ret = -EINVAL; 101 struct scatterlist sg[1]; 102 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 103 SKCIPHER_REQUEST_ON_STACK(req, tfm); 104 105 if (length % crypto_skcipher_blocksize(tfm) != 0) 106 goto out; 107 108 if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 109 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 110 crypto_skcipher_ivsize(tfm)); 111 goto out; 112 } 113 if (iv) 114 memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm)); 115 116 memcpy(out, in, length); 117 sg_init_one(sg, out, length); 118 119 skcipher_request_set_tfm(req, tfm); 120 skcipher_request_set_callback(req, 0, NULL, NULL); 121 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 122 123 ret = crypto_skcipher_decrypt(req); 124 skcipher_request_zero(req); 125 out: 126 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 127 return ret; 128 } 129 130 static int 131 checksummer(struct scatterlist *sg, void *data) 132 { 133 struct ahash_request *req = data; 134 135 ahash_request_set_crypt(req, sg, NULL, sg->length); 136 137 return crypto_ahash_update(req); 138 } 139 140 static int 141 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) 142 { 143 unsigned int ms_usage; 144 145 switch (usage) { 146 case KG_USAGE_SIGN: 147 ms_usage = 15; 148 break; 149 case KG_USAGE_SEAL: 150 ms_usage = 13; 151 break; 152 default: 153 return -EINVAL; 154 } 155 salt[0] = (ms_usage >> 0) & 0xff; 156 salt[1] = (ms_usage >> 8) & 0xff; 157 salt[2] = (ms_usage >> 16) & 0xff; 158 salt[3] = (ms_usage >> 24) & 0xff; 159 160 return 0; 161 } 162 163 static u32 164 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, 165 struct xdr_buf *body, int body_offset, u8 *cksumkey, 166 unsigned int usage, struct xdr_netobj *cksumout) 167 { 168 struct scatterlist sg[1]; 169 int err; 170 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 171 u8 rc4salt[4]; 172 struct crypto_ahash *md5; 173 struct crypto_ahash *hmac_md5; 174 struct ahash_request *req; 175 176 if (cksumkey == NULL) 177 return GSS_S_FAILURE; 178 179 if (cksumout->len < kctx->gk5e->cksumlength) { 180 dprintk("%s: checksum buffer length, %u, too small for %s\n", 181 __func__, cksumout->len, kctx->gk5e->name); 182 return GSS_S_FAILURE; 183 } 184 185 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { 186 dprintk("%s: invalid usage value %u\n", __func__, usage); 187 return GSS_S_FAILURE; 188 } 189 190 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 191 if (IS_ERR(md5)) 192 return GSS_S_FAILURE; 193 194 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 195 CRYPTO_ALG_ASYNC); 196 if (IS_ERR(hmac_md5)) { 197 crypto_free_ahash(md5); 198 return GSS_S_FAILURE; 199 } 200 201 req = ahash_request_alloc(md5, GFP_KERNEL); 202 if (!req) { 203 crypto_free_ahash(hmac_md5); 204 crypto_free_ahash(md5); 205 return GSS_S_FAILURE; 206 } 207 208 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 209 210 err = crypto_ahash_init(req); 211 if (err) 212 goto out; 213 sg_init_one(sg, rc4salt, 4); 214 ahash_request_set_crypt(req, sg, NULL, 4); 215 err = crypto_ahash_update(req); 216 if (err) 217 goto out; 218 219 sg_init_one(sg, header, hdrlen); 220 ahash_request_set_crypt(req, sg, NULL, hdrlen); 221 err = crypto_ahash_update(req); 222 if (err) 223 goto out; 224 err = xdr_process_buf(body, body_offset, body->len - body_offset, 225 checksummer, req); 226 if (err) 227 goto out; 228 ahash_request_set_crypt(req, NULL, checksumdata, 0); 229 err = crypto_ahash_final(req); 230 if (err) 231 goto out; 232 233 ahash_request_free(req); 234 req = ahash_request_alloc(hmac_md5, GFP_KERNEL); 235 if (!req) { 236 crypto_free_ahash(hmac_md5); 237 crypto_free_ahash(md5); 238 return GSS_S_FAILURE; 239 } 240 241 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 242 243 err = crypto_ahash_init(req); 244 if (err) 245 goto out; 246 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 247 if (err) 248 goto out; 249 250 sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 251 ahash_request_set_crypt(req, sg, checksumdata, 252 crypto_ahash_digestsize(md5)); 253 err = crypto_ahash_digest(req); 254 if (err) 255 goto out; 256 257 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 258 cksumout->len = kctx->gk5e->cksumlength; 259 out: 260 ahash_request_free(req); 261 crypto_free_ahash(md5); 262 crypto_free_ahash(hmac_md5); 263 return err ? GSS_S_FAILURE : 0; 264 } 265 266 /* 267 * checksum the plaintext data and hdrlen bytes of the token header 268 * The checksum is performed over the first 8 bytes of the 269 * gss token header and then over the data body 270 */ 271 u32 272 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, 273 struct xdr_buf *body, int body_offset, u8 *cksumkey, 274 unsigned int usage, struct xdr_netobj *cksumout) 275 { 276 struct crypto_ahash *tfm; 277 struct ahash_request *req; 278 struct scatterlist sg[1]; 279 int err; 280 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 281 unsigned int checksumlen; 282 283 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 284 return make_checksum_hmac_md5(kctx, header, hdrlen, 285 body, body_offset, 286 cksumkey, usage, cksumout); 287 288 if (cksumout->len < kctx->gk5e->cksumlength) { 289 dprintk("%s: checksum buffer length, %u, too small for %s\n", 290 __func__, cksumout->len, kctx->gk5e->name); 291 return GSS_S_FAILURE; 292 } 293 294 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 295 if (IS_ERR(tfm)) 296 return GSS_S_FAILURE; 297 298 req = ahash_request_alloc(tfm, GFP_KERNEL); 299 if (!req) { 300 crypto_free_ahash(tfm); 301 return GSS_S_FAILURE; 302 } 303 304 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 305 306 checksumlen = crypto_ahash_digestsize(tfm); 307 308 if (cksumkey != NULL) { 309 err = crypto_ahash_setkey(tfm, cksumkey, 310 kctx->gk5e->keylength); 311 if (err) 312 goto out; 313 } 314 315 err = crypto_ahash_init(req); 316 if (err) 317 goto out; 318 sg_init_one(sg, header, hdrlen); 319 ahash_request_set_crypt(req, sg, NULL, hdrlen); 320 err = crypto_ahash_update(req); 321 if (err) 322 goto out; 323 err = xdr_process_buf(body, body_offset, body->len - body_offset, 324 checksummer, req); 325 if (err) 326 goto out; 327 ahash_request_set_crypt(req, NULL, checksumdata, 0); 328 err = crypto_ahash_final(req); 329 if (err) 330 goto out; 331 332 switch (kctx->gk5e->ctype) { 333 case CKSUMTYPE_RSA_MD5: 334 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, 335 checksumdata, checksumlen); 336 if (err) 337 goto out; 338 memcpy(cksumout->data, 339 checksumdata + checksumlen - kctx->gk5e->cksumlength, 340 kctx->gk5e->cksumlength); 341 break; 342 case CKSUMTYPE_HMAC_SHA1_DES3: 343 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 344 break; 345 default: 346 BUG(); 347 break; 348 } 349 cksumout->len = kctx->gk5e->cksumlength; 350 out: 351 ahash_request_free(req); 352 crypto_free_ahash(tfm); 353 return err ? GSS_S_FAILURE : 0; 354 } 355 356 /* 357 * checksum the plaintext data and hdrlen bytes of the token header 358 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data 359 * body then over the first 16 octets of the MIC token 360 * Inclusion of the header data in the calculation of the 361 * checksum is optional. 362 */ 363 u32 364 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, 365 struct xdr_buf *body, int body_offset, u8 *cksumkey, 366 unsigned int usage, struct xdr_netobj *cksumout) 367 { 368 struct crypto_ahash *tfm; 369 struct ahash_request *req; 370 struct scatterlist sg[1]; 371 int err; 372 u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 373 unsigned int checksumlen; 374 375 if (kctx->gk5e->keyed_cksum == 0) { 376 dprintk("%s: expected keyed hash for %s\n", 377 __func__, kctx->gk5e->name); 378 return GSS_S_FAILURE; 379 } 380 if (cksumkey == NULL) { 381 dprintk("%s: no key supplied for %s\n", 382 __func__, kctx->gk5e->name); 383 return GSS_S_FAILURE; 384 } 385 386 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 387 if (IS_ERR(tfm)) 388 return GSS_S_FAILURE; 389 checksumlen = crypto_ahash_digestsize(tfm); 390 391 req = ahash_request_alloc(tfm, GFP_KERNEL); 392 if (!req) { 393 crypto_free_ahash(tfm); 394 return GSS_S_FAILURE; 395 } 396 397 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 398 399 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 400 if (err) 401 goto out; 402 403 err = crypto_ahash_init(req); 404 if (err) 405 goto out; 406 err = xdr_process_buf(body, body_offset, body->len - body_offset, 407 checksummer, req); 408 if (err) 409 goto out; 410 if (header != NULL) { 411 sg_init_one(sg, header, hdrlen); 412 ahash_request_set_crypt(req, sg, NULL, hdrlen); 413 err = crypto_ahash_update(req); 414 if (err) 415 goto out; 416 } 417 ahash_request_set_crypt(req, NULL, checksumdata, 0); 418 err = crypto_ahash_final(req); 419 if (err) 420 goto out; 421 422 cksumout->len = kctx->gk5e->cksumlength; 423 424 switch (kctx->gk5e->ctype) { 425 case CKSUMTYPE_HMAC_SHA1_96_AES128: 426 case CKSUMTYPE_HMAC_SHA1_96_AES256: 427 /* note that this truncates the hash */ 428 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 429 break; 430 default: 431 BUG(); 432 break; 433 } 434 out: 435 ahash_request_free(req); 436 crypto_free_ahash(tfm); 437 return err ? GSS_S_FAILURE : 0; 438 } 439 440 struct encryptor_desc { 441 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 442 struct skcipher_request *req; 443 int pos; 444 struct xdr_buf *outbuf; 445 struct page **pages; 446 struct scatterlist infrags[4]; 447 struct scatterlist outfrags[4]; 448 int fragno; 449 int fraglen; 450 }; 451 452 static int 453 encryptor(struct scatterlist *sg, void *data) 454 { 455 struct encryptor_desc *desc = data; 456 struct xdr_buf *outbuf = desc->outbuf; 457 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 458 struct page *in_page; 459 int thislen = desc->fraglen + sg->length; 460 int fraglen, ret; 461 int page_pos; 462 463 /* Worst case is 4 fragments: head, end of page 1, start 464 * of page 2, tail. Anything more is a bug. */ 465 BUG_ON(desc->fragno > 3); 466 467 page_pos = desc->pos - outbuf->head[0].iov_len; 468 if (page_pos >= 0 && page_pos < outbuf->page_len) { 469 /* pages are not in place: */ 470 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; 471 in_page = desc->pages[i]; 472 } else { 473 in_page = sg_page(sg); 474 } 475 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 476 sg->offset); 477 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 478 sg->offset); 479 desc->fragno++; 480 desc->fraglen += sg->length; 481 desc->pos += sg->length; 482 483 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 484 thislen -= fraglen; 485 486 if (thislen == 0) 487 return 0; 488 489 sg_mark_end(&desc->infrags[desc->fragno - 1]); 490 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 491 492 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 493 thislen, desc->iv); 494 495 ret = crypto_skcipher_encrypt(desc->req); 496 if (ret) 497 return ret; 498 499 sg_init_table(desc->infrags, 4); 500 sg_init_table(desc->outfrags, 4); 501 502 if (fraglen) { 503 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 504 sg->offset + sg->length - fraglen); 505 desc->infrags[0] = desc->outfrags[0]; 506 sg_assign_page(&desc->infrags[0], in_page); 507 desc->fragno = 1; 508 desc->fraglen = fraglen; 509 } else { 510 desc->fragno = 0; 511 desc->fraglen = 0; 512 } 513 return 0; 514 } 515 516 int 517 gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 518 int offset, struct page **pages) 519 { 520 int ret; 521 struct encryptor_desc desc; 522 SKCIPHER_REQUEST_ON_STACK(req, tfm); 523 524 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 525 526 skcipher_request_set_tfm(req, tfm); 527 skcipher_request_set_callback(req, 0, NULL, NULL); 528 529 memset(desc.iv, 0, sizeof(desc.iv)); 530 desc.req = req; 531 desc.pos = offset; 532 desc.outbuf = buf; 533 desc.pages = pages; 534 desc.fragno = 0; 535 desc.fraglen = 0; 536 537 sg_init_table(desc.infrags, 4); 538 sg_init_table(desc.outfrags, 4); 539 540 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 541 skcipher_request_zero(req); 542 return ret; 543 } 544 545 struct decryptor_desc { 546 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 547 struct skcipher_request *req; 548 struct scatterlist frags[4]; 549 int fragno; 550 int fraglen; 551 }; 552 553 static int 554 decryptor(struct scatterlist *sg, void *data) 555 { 556 struct decryptor_desc *desc = data; 557 int thislen = desc->fraglen + sg->length; 558 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req); 559 int fraglen, ret; 560 561 /* Worst case is 4 fragments: head, end of page 1, start 562 * of page 2, tail. Anything more is a bug. */ 563 BUG_ON(desc->fragno > 3); 564 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 565 sg->offset); 566 desc->fragno++; 567 desc->fraglen += sg->length; 568 569 fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1); 570 thislen -= fraglen; 571 572 if (thislen == 0) 573 return 0; 574 575 sg_mark_end(&desc->frags[desc->fragno - 1]); 576 577 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 578 thislen, desc->iv); 579 580 ret = crypto_skcipher_decrypt(desc->req); 581 if (ret) 582 return ret; 583 584 sg_init_table(desc->frags, 4); 585 586 if (fraglen) { 587 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 588 sg->offset + sg->length - fraglen); 589 desc->fragno = 1; 590 desc->fraglen = fraglen; 591 } else { 592 desc->fragno = 0; 593 desc->fraglen = 0; 594 } 595 return 0; 596 } 597 598 int 599 gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf, 600 int offset) 601 { 602 int ret; 603 struct decryptor_desc desc; 604 SKCIPHER_REQUEST_ON_STACK(req, tfm); 605 606 /* XXXJBF: */ 607 BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0); 608 609 skcipher_request_set_tfm(req, tfm); 610 skcipher_request_set_callback(req, 0, NULL, NULL); 611 612 memset(desc.iv, 0, sizeof(desc.iv)); 613 desc.req = req; 614 desc.fragno = 0; 615 desc.fraglen = 0; 616 617 sg_init_table(desc.frags, 4); 618 619 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 620 skcipher_request_zero(req); 621 return ret; 622 } 623 624 /* 625 * This function makes the assumption that it was ultimately called 626 * from gss_wrap(). 627 * 628 * The client auth_gss code moves any existing tail data into a 629 * separate page before calling gss_wrap. 630 * The server svcauth_gss code ensures that both the head and the 631 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. 632 * 633 * Even with that guarantee, this function may be called more than 634 * once in the processing of gss_wrap(). The best we can do is 635 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the 636 * largest expected shift will fit within RPC_MAX_AUTH_SIZE. 637 * At run-time we can verify that a single invocation of this 638 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. 639 */ 640 641 int 642 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) 643 { 644 u8 *p; 645 646 if (shiftlen == 0) 647 return 0; 648 649 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); 650 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); 651 652 p = buf->head[0].iov_base + base; 653 654 memmove(p + shiftlen, p, buf->head[0].iov_len - base); 655 656 buf->head[0].iov_len += shiftlen; 657 buf->len += shiftlen; 658 659 return 0; 660 } 661 662 static u32 663 gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf, 664 u32 offset, u8 *iv, struct page **pages, int encrypt) 665 { 666 u32 ret; 667 struct scatterlist sg[1]; 668 SKCIPHER_REQUEST_ON_STACK(req, cipher); 669 u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2]; 670 struct page **save_pages; 671 u32 len = buf->len - offset; 672 673 if (len > ARRAY_SIZE(data)) { 674 WARN_ON(0); 675 return -ENOMEM; 676 } 677 678 /* 679 * For encryption, we want to read from the cleartext 680 * page cache pages, and write the encrypted data to 681 * the supplied xdr_buf pages. 682 */ 683 save_pages = buf->pages; 684 if (encrypt) 685 buf->pages = pages; 686 687 ret = read_bytes_from_xdr_buf(buf, offset, data, len); 688 buf->pages = save_pages; 689 if (ret) 690 goto out; 691 692 sg_init_one(sg, data, len); 693 694 skcipher_request_set_tfm(req, cipher); 695 skcipher_request_set_callback(req, 0, NULL, NULL); 696 skcipher_request_set_crypt(req, sg, sg, len, iv); 697 698 if (encrypt) 699 ret = crypto_skcipher_encrypt(req); 700 else 701 ret = crypto_skcipher_decrypt(req); 702 703 skcipher_request_zero(req); 704 705 if (ret) 706 goto out; 707 708 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 709 710 out: 711 return ret; 712 } 713 714 u32 715 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, 716 struct xdr_buf *buf, struct page **pages) 717 { 718 u32 err; 719 struct xdr_netobj hmac; 720 u8 *cksumkey; 721 u8 *ecptr; 722 struct crypto_skcipher *cipher, *aux_cipher; 723 int blocksize; 724 struct page **save_pages; 725 int nblocks, nbytes; 726 struct encryptor_desc desc; 727 u32 cbcbytes; 728 unsigned int usage; 729 730 if (kctx->initiate) { 731 cipher = kctx->initiator_enc; 732 aux_cipher = kctx->initiator_enc_aux; 733 cksumkey = kctx->initiator_integ; 734 usage = KG_USAGE_INITIATOR_SEAL; 735 } else { 736 cipher = kctx->acceptor_enc; 737 aux_cipher = kctx->acceptor_enc_aux; 738 cksumkey = kctx->acceptor_integ; 739 usage = KG_USAGE_ACCEPTOR_SEAL; 740 } 741 blocksize = crypto_skcipher_blocksize(cipher); 742 743 /* hide the gss token header and insert the confounder */ 744 offset += GSS_KRB5_TOK_HDR_LEN; 745 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) 746 return GSS_S_FAILURE; 747 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); 748 offset -= GSS_KRB5_TOK_HDR_LEN; 749 750 if (buf->tail[0].iov_base != NULL) { 751 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; 752 } else { 753 buf->tail[0].iov_base = buf->head[0].iov_base 754 + buf->head[0].iov_len; 755 buf->tail[0].iov_len = 0; 756 ecptr = buf->tail[0].iov_base; 757 } 758 759 /* copy plaintext gss token header after filler (if any) */ 760 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); 761 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; 762 buf->len += GSS_KRB5_TOK_HDR_LEN; 763 764 /* Do the HMAC */ 765 hmac.len = GSS_KRB5_MAX_CKSUM_LEN; 766 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; 767 768 /* 769 * When we are called, pages points to the real page cache 770 * data -- which we can't go and encrypt! buf->pages points 771 * to scratch pages which we are going to send off to the 772 * client/server. Swap in the plaintext pages to calculate 773 * the hmac. 774 */ 775 save_pages = buf->pages; 776 buf->pages = pages; 777 778 err = make_checksum_v2(kctx, NULL, 0, buf, 779 offset + GSS_KRB5_TOK_HDR_LEN, 780 cksumkey, usage, &hmac); 781 buf->pages = save_pages; 782 if (err) 783 return GSS_S_FAILURE; 784 785 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; 786 nblocks = (nbytes + blocksize - 1) / blocksize; 787 cbcbytes = 0; 788 if (nblocks > 2) 789 cbcbytes = (nblocks - 2) * blocksize; 790 791 memset(desc.iv, 0, sizeof(desc.iv)); 792 793 if (cbcbytes) { 794 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 795 796 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 797 desc.fragno = 0; 798 desc.fraglen = 0; 799 desc.pages = pages; 800 desc.outbuf = buf; 801 desc.req = req; 802 803 skcipher_request_set_tfm(req, aux_cipher); 804 skcipher_request_set_callback(req, 0, NULL, NULL); 805 806 sg_init_table(desc.infrags, 4); 807 sg_init_table(desc.outfrags, 4); 808 809 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 810 cbcbytes, encryptor, &desc); 811 skcipher_request_zero(req); 812 if (err) 813 goto out_err; 814 } 815 816 /* Make sure IV carries forward from any CBC results. */ 817 err = gss_krb5_cts_crypt(cipher, buf, 818 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, 819 desc.iv, pages, 1); 820 if (err) { 821 err = GSS_S_FAILURE; 822 goto out_err; 823 } 824 825 /* Now update buf to account for HMAC */ 826 buf->tail[0].iov_len += kctx->gk5e->cksumlength; 827 buf->len += kctx->gk5e->cksumlength; 828 829 out_err: 830 if (err) 831 err = GSS_S_FAILURE; 832 return err; 833 } 834 835 u32 836 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, 837 u32 *headskip, u32 *tailskip) 838 { 839 struct xdr_buf subbuf; 840 u32 ret = 0; 841 u8 *cksum_key; 842 struct crypto_skcipher *cipher, *aux_cipher; 843 struct xdr_netobj our_hmac_obj; 844 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 845 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 846 int nblocks, blocksize, cbcbytes; 847 struct decryptor_desc desc; 848 unsigned int usage; 849 850 if (kctx->initiate) { 851 cipher = kctx->acceptor_enc; 852 aux_cipher = kctx->acceptor_enc_aux; 853 cksum_key = kctx->acceptor_integ; 854 usage = KG_USAGE_ACCEPTOR_SEAL; 855 } else { 856 cipher = kctx->initiator_enc; 857 aux_cipher = kctx->initiator_enc_aux; 858 cksum_key = kctx->initiator_integ; 859 usage = KG_USAGE_INITIATOR_SEAL; 860 } 861 blocksize = crypto_skcipher_blocksize(cipher); 862 863 864 /* create a segment skipping the header and leaving out the checksum */ 865 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, 866 (buf->len - offset - GSS_KRB5_TOK_HDR_LEN - 867 kctx->gk5e->cksumlength)); 868 869 nblocks = (subbuf.len + blocksize - 1) / blocksize; 870 871 cbcbytes = 0; 872 if (nblocks > 2) 873 cbcbytes = (nblocks - 2) * blocksize; 874 875 memset(desc.iv, 0, sizeof(desc.iv)); 876 877 if (cbcbytes) { 878 SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 879 880 desc.fragno = 0; 881 desc.fraglen = 0; 882 desc.req = req; 883 884 skcipher_request_set_tfm(req, aux_cipher); 885 skcipher_request_set_callback(req, 0, NULL, NULL); 886 887 sg_init_table(desc.frags, 4); 888 889 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 890 skcipher_request_zero(req); 891 if (ret) 892 goto out_err; 893 } 894 895 /* Make sure IV carries forward from any CBC results. */ 896 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); 897 if (ret) 898 goto out_err; 899 900 901 /* Calculate our hmac over the plaintext data */ 902 our_hmac_obj.len = sizeof(our_hmac); 903 our_hmac_obj.data = our_hmac; 904 905 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, 906 cksum_key, usage, &our_hmac_obj); 907 if (ret) 908 goto out_err; 909 910 /* Get the packet's hmac value */ 911 ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength, 912 pkt_hmac, kctx->gk5e->cksumlength); 913 if (ret) 914 goto out_err; 915 916 if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { 917 ret = GSS_S_BAD_SIG; 918 goto out_err; 919 } 920 *headskip = kctx->gk5e->conflen; 921 *tailskip = kctx->gk5e->cksumlength; 922 out_err: 923 if (ret && ret != GSS_S_BAD_SIG) 924 ret = GSS_S_FAILURE; 925 return ret; 926 } 927 928 /* 929 * Compute Kseq given the initial session key and the checksum. 930 * Set the key of the given cipher. 931 */ 932 int 933 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 934 unsigned char *cksum) 935 { 936 struct crypto_shash *hmac; 937 struct shash_desc *desc; 938 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 939 u32 zeroconstant = 0; 940 int err; 941 942 dprintk("%s: entered\n", __func__); 943 944 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 945 if (IS_ERR(hmac)) { 946 dprintk("%s: error %ld, allocating hash '%s'\n", 947 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 948 return PTR_ERR(hmac); 949 } 950 951 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 952 GFP_KERNEL); 953 if (!desc) { 954 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 955 __func__, kctx->gk5e->cksum_name); 956 crypto_free_shash(hmac); 957 return -ENOMEM; 958 } 959 960 desc->tfm = hmac; 961 desc->flags = 0; 962 963 /* Compute intermediate Kseq from session key */ 964 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 965 if (err) 966 goto out_err; 967 968 err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 969 if (err) 970 goto out_err; 971 972 /* Compute final Kseq from the checksum and intermediate Kseq */ 973 err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 974 if (err) 975 goto out_err; 976 977 err = crypto_shash_digest(desc, cksum, 8, Kseq); 978 if (err) 979 goto out_err; 980 981 err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 982 if (err) 983 goto out_err; 984 985 err = 0; 986 987 out_err: 988 kzfree(desc); 989 crypto_free_shash(hmac); 990 dprintk("%s: returning %d\n", __func__, err); 991 return err; 992 } 993 994 /* 995 * Compute Kcrypt given the initial session key and the plaintext seqnum. 996 * Set the key of cipher kctx->enc. 997 */ 998 int 999 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher, 1000 s32 seqnum) 1001 { 1002 struct crypto_shash *hmac; 1003 struct shash_desc *desc; 1004 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1005 u8 zeroconstant[4] = {0}; 1006 u8 seqnumarray[4]; 1007 int err, i; 1008 1009 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1010 1011 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1012 if (IS_ERR(hmac)) { 1013 dprintk("%s: error %ld, allocating hash '%s'\n", 1014 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1015 return PTR_ERR(hmac); 1016 } 1017 1018 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 1019 GFP_KERNEL); 1020 if (!desc) { 1021 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1022 __func__, kctx->gk5e->cksum_name); 1023 crypto_free_shash(hmac); 1024 return -ENOMEM; 1025 } 1026 1027 desc->tfm = hmac; 1028 desc->flags = 0; 1029 1030 /* Compute intermediate Kcrypt from session key */ 1031 for (i = 0; i < kctx->gk5e->keylength; i++) 1032 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1033 1034 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1035 if (err) 1036 goto out_err; 1037 1038 err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1039 if (err) 1040 goto out_err; 1041 1042 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1043 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1044 if (err) 1045 goto out_err; 1046 1047 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); 1048 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); 1049 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1050 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1051 1052 err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1053 if (err) 1054 goto out_err; 1055 1056 err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength); 1057 if (err) 1058 goto out_err; 1059 1060 err = 0; 1061 1062 out_err: 1063 kzfree(desc); 1064 crypto_free_shash(hmac); 1065 dprintk("%s: returning %d\n", __func__, err); 1066 return err; 1067 } 1068 1069