1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000-2008 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <crypto/algapi.h> 38 #include <crypto/hash.h> 39 #include <crypto/skcipher.h> 40 #include <linux/err.h> 41 #include <linux/types.h> 42 #include <linux/mm.h> 43 #include <linux/scatterlist.h> 44 #include <linux/highmem.h> 45 #include <linux/pagemap.h> 46 #include <linux/random.h> 47 #include <linux/sunrpc/gss_krb5.h> 48 #include <linux/sunrpc/xdr.h> 49 50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 51 # define RPCDBG_FACILITY RPCDBG_AUTH 52 #endif 53 54 u32 55 krb5_encrypt( 56 struct crypto_sync_skcipher *tfm, 57 void * iv, 58 void * in, 59 void * out, 60 int length) 61 { 62 u32 ret = -EINVAL; 63 struct scatterlist sg[1]; 64 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 65 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); 66 67 if (length % crypto_sync_skcipher_blocksize(tfm) != 0) 68 goto out; 69 70 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 71 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n", 72 crypto_sync_skcipher_ivsize(tfm)); 73 goto out; 74 } 75 76 if (iv) 77 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); 78 79 memcpy(out, in, length); 80 sg_init_one(sg, out, length); 81 82 skcipher_request_set_sync_tfm(req, tfm); 83 skcipher_request_set_callback(req, 0, NULL, NULL); 84 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 85 86 ret = crypto_skcipher_encrypt(req); 87 skcipher_request_zero(req); 88 out: 89 dprintk("RPC: krb5_encrypt returns %d\n", ret); 90 return ret; 91 } 92 93 u32 94 krb5_decrypt( 95 struct crypto_sync_skcipher *tfm, 96 void * iv, 97 void * in, 98 void * out, 99 int length) 100 { 101 u32 ret = -EINVAL; 102 struct scatterlist sg[1]; 103 u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0}; 104 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); 105 106 if (length % crypto_sync_skcipher_blocksize(tfm) != 0) 107 goto out; 108 109 if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) { 110 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n", 111 crypto_sync_skcipher_ivsize(tfm)); 112 goto out; 113 } 114 if (iv) 115 memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); 116 117 memcpy(out, in, length); 118 sg_init_one(sg, out, length); 119 120 skcipher_request_set_sync_tfm(req, tfm); 121 skcipher_request_set_callback(req, 0, NULL, NULL); 122 skcipher_request_set_crypt(req, sg, sg, length, local_iv); 123 124 ret = crypto_skcipher_decrypt(req); 125 skcipher_request_zero(req); 126 out: 127 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 128 return ret; 129 } 130 131 static int 132 checksummer(struct scatterlist *sg, void *data) 133 { 134 struct ahash_request *req = data; 135 136 ahash_request_set_crypt(req, sg, NULL, sg->length); 137 138 return crypto_ahash_update(req); 139 } 140 141 static int 142 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) 143 { 144 unsigned int ms_usage; 145 146 switch (usage) { 147 case KG_USAGE_SIGN: 148 ms_usage = 15; 149 break; 150 case KG_USAGE_SEAL: 151 ms_usage = 13; 152 break; 153 default: 154 return -EINVAL; 155 } 156 salt[0] = (ms_usage >> 0) & 0xff; 157 salt[1] = (ms_usage >> 8) & 0xff; 158 salt[2] = (ms_usage >> 16) & 0xff; 159 salt[3] = (ms_usage >> 24) & 0xff; 160 161 return 0; 162 } 163 164 static u32 165 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, 166 struct xdr_buf *body, int body_offset, u8 *cksumkey, 167 unsigned int usage, struct xdr_netobj *cksumout) 168 { 169 struct scatterlist sg[1]; 170 int err = -1; 171 u8 *checksumdata; 172 u8 *rc4salt; 173 struct crypto_ahash *md5; 174 struct crypto_ahash *hmac_md5; 175 struct ahash_request *req; 176 177 if (cksumkey == NULL) 178 return GSS_S_FAILURE; 179 180 if (cksumout->len < kctx->gk5e->cksumlength) { 181 dprintk("%s: checksum buffer length, %u, too small for %s\n", 182 __func__, cksumout->len, kctx->gk5e->name); 183 return GSS_S_FAILURE; 184 } 185 186 rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS); 187 if (!rc4salt) 188 return GSS_S_FAILURE; 189 190 if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { 191 dprintk("%s: invalid usage value %u\n", __func__, usage); 192 goto out_free_rc4salt; 193 } 194 195 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 196 if (!checksumdata) 197 goto out_free_rc4salt; 198 199 md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 200 if (IS_ERR(md5)) 201 goto out_free_cksum; 202 203 hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, 204 CRYPTO_ALG_ASYNC); 205 if (IS_ERR(hmac_md5)) 206 goto out_free_md5; 207 208 req = ahash_request_alloc(md5, GFP_NOFS); 209 if (!req) 210 goto out_free_hmac_md5; 211 212 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 213 214 err = crypto_ahash_init(req); 215 if (err) 216 goto out; 217 sg_init_one(sg, rc4salt, 4); 218 ahash_request_set_crypt(req, sg, NULL, 4); 219 err = crypto_ahash_update(req); 220 if (err) 221 goto out; 222 223 sg_init_one(sg, header, hdrlen); 224 ahash_request_set_crypt(req, sg, NULL, hdrlen); 225 err = crypto_ahash_update(req); 226 if (err) 227 goto out; 228 err = xdr_process_buf(body, body_offset, body->len - body_offset, 229 checksummer, req); 230 if (err) 231 goto out; 232 ahash_request_set_crypt(req, NULL, checksumdata, 0); 233 err = crypto_ahash_final(req); 234 if (err) 235 goto out; 236 237 ahash_request_free(req); 238 req = ahash_request_alloc(hmac_md5, GFP_NOFS); 239 if (!req) 240 goto out_free_hmac_md5; 241 242 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 243 244 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); 245 if (err) 246 goto out; 247 248 sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); 249 ahash_request_set_crypt(req, sg, checksumdata, 250 crypto_ahash_digestsize(md5)); 251 err = crypto_ahash_digest(req); 252 if (err) 253 goto out; 254 255 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 256 cksumout->len = kctx->gk5e->cksumlength; 257 out: 258 ahash_request_free(req); 259 out_free_hmac_md5: 260 crypto_free_ahash(hmac_md5); 261 out_free_md5: 262 crypto_free_ahash(md5); 263 out_free_cksum: 264 kfree(checksumdata); 265 out_free_rc4salt: 266 kfree(rc4salt); 267 return err ? GSS_S_FAILURE : 0; 268 } 269 270 /* 271 * checksum the plaintext data and hdrlen bytes of the token header 272 * The checksum is performed over the first 8 bytes of the 273 * gss token header and then over the data body 274 */ 275 u32 276 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, 277 struct xdr_buf *body, int body_offset, u8 *cksumkey, 278 unsigned int usage, struct xdr_netobj *cksumout) 279 { 280 struct crypto_ahash *tfm; 281 struct ahash_request *req; 282 struct scatterlist sg[1]; 283 int err = -1; 284 u8 *checksumdata; 285 unsigned int checksumlen; 286 287 if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) 288 return make_checksum_hmac_md5(kctx, header, hdrlen, 289 body, body_offset, 290 cksumkey, usage, cksumout); 291 292 if (cksumout->len < kctx->gk5e->cksumlength) { 293 dprintk("%s: checksum buffer length, %u, too small for %s\n", 294 __func__, cksumout->len, kctx->gk5e->name); 295 return GSS_S_FAILURE; 296 } 297 298 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 299 if (checksumdata == NULL) 300 return GSS_S_FAILURE; 301 302 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 303 if (IS_ERR(tfm)) 304 goto out_free_cksum; 305 306 req = ahash_request_alloc(tfm, GFP_NOFS); 307 if (!req) 308 goto out_free_ahash; 309 310 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 311 312 checksumlen = crypto_ahash_digestsize(tfm); 313 314 if (cksumkey != NULL) { 315 err = crypto_ahash_setkey(tfm, cksumkey, 316 kctx->gk5e->keylength); 317 if (err) 318 goto out; 319 } 320 321 err = crypto_ahash_init(req); 322 if (err) 323 goto out; 324 sg_init_one(sg, header, hdrlen); 325 ahash_request_set_crypt(req, sg, NULL, hdrlen); 326 err = crypto_ahash_update(req); 327 if (err) 328 goto out; 329 err = xdr_process_buf(body, body_offset, body->len - body_offset, 330 checksummer, req); 331 if (err) 332 goto out; 333 ahash_request_set_crypt(req, NULL, checksumdata, 0); 334 err = crypto_ahash_final(req); 335 if (err) 336 goto out; 337 338 switch (kctx->gk5e->ctype) { 339 case CKSUMTYPE_RSA_MD5: 340 err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata, 341 checksumdata, checksumlen); 342 if (err) 343 goto out; 344 memcpy(cksumout->data, 345 checksumdata + checksumlen - kctx->gk5e->cksumlength, 346 kctx->gk5e->cksumlength); 347 break; 348 case CKSUMTYPE_HMAC_SHA1_DES3: 349 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 350 break; 351 default: 352 BUG(); 353 break; 354 } 355 cksumout->len = kctx->gk5e->cksumlength; 356 out: 357 ahash_request_free(req); 358 out_free_ahash: 359 crypto_free_ahash(tfm); 360 out_free_cksum: 361 kfree(checksumdata); 362 return err ? GSS_S_FAILURE : 0; 363 } 364 365 /* 366 * checksum the plaintext data and hdrlen bytes of the token header 367 * Per rfc4121, sec. 4.2.4, the checksum is performed over the data 368 * body then over the first 16 octets of the MIC token 369 * Inclusion of the header data in the calculation of the 370 * checksum is optional. 371 */ 372 u32 373 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen, 374 struct xdr_buf *body, int body_offset, u8 *cksumkey, 375 unsigned int usage, struct xdr_netobj *cksumout) 376 { 377 struct crypto_ahash *tfm; 378 struct ahash_request *req; 379 struct scatterlist sg[1]; 380 int err = -1; 381 u8 *checksumdata; 382 383 if (kctx->gk5e->keyed_cksum == 0) { 384 dprintk("%s: expected keyed hash for %s\n", 385 __func__, kctx->gk5e->name); 386 return GSS_S_FAILURE; 387 } 388 if (cksumkey == NULL) { 389 dprintk("%s: no key supplied for %s\n", 390 __func__, kctx->gk5e->name); 391 return GSS_S_FAILURE; 392 } 393 394 checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); 395 if (!checksumdata) 396 return GSS_S_FAILURE; 397 398 tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); 399 if (IS_ERR(tfm)) 400 goto out_free_cksum; 401 402 req = ahash_request_alloc(tfm, GFP_NOFS); 403 if (!req) 404 goto out_free_ahash; 405 406 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); 407 408 err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength); 409 if (err) 410 goto out; 411 412 err = crypto_ahash_init(req); 413 if (err) 414 goto out; 415 err = xdr_process_buf(body, body_offset, body->len - body_offset, 416 checksummer, req); 417 if (err) 418 goto out; 419 if (header != NULL) { 420 sg_init_one(sg, header, hdrlen); 421 ahash_request_set_crypt(req, sg, NULL, hdrlen); 422 err = crypto_ahash_update(req); 423 if (err) 424 goto out; 425 } 426 ahash_request_set_crypt(req, NULL, checksumdata, 0); 427 err = crypto_ahash_final(req); 428 if (err) 429 goto out; 430 431 cksumout->len = kctx->gk5e->cksumlength; 432 433 switch (kctx->gk5e->ctype) { 434 case CKSUMTYPE_HMAC_SHA1_96_AES128: 435 case CKSUMTYPE_HMAC_SHA1_96_AES256: 436 /* note that this truncates the hash */ 437 memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); 438 break; 439 default: 440 BUG(); 441 break; 442 } 443 out: 444 ahash_request_free(req); 445 out_free_ahash: 446 crypto_free_ahash(tfm); 447 out_free_cksum: 448 kfree(checksumdata); 449 return err ? GSS_S_FAILURE : 0; 450 } 451 452 struct encryptor_desc { 453 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 454 struct skcipher_request *req; 455 int pos; 456 struct xdr_buf *outbuf; 457 struct page **pages; 458 struct scatterlist infrags[4]; 459 struct scatterlist outfrags[4]; 460 int fragno; 461 int fraglen; 462 }; 463 464 static int 465 encryptor(struct scatterlist *sg, void *data) 466 { 467 struct encryptor_desc *desc = data; 468 struct xdr_buf *outbuf = desc->outbuf; 469 struct crypto_sync_skcipher *tfm = 470 crypto_sync_skcipher_reqtfm(desc->req); 471 struct page *in_page; 472 int thislen = desc->fraglen + sg->length; 473 int fraglen, ret; 474 int page_pos; 475 476 /* Worst case is 4 fragments: head, end of page 1, start 477 * of page 2, tail. Anything more is a bug. */ 478 BUG_ON(desc->fragno > 3); 479 480 page_pos = desc->pos - outbuf->head[0].iov_len; 481 if (page_pos >= 0 && page_pos < outbuf->page_len) { 482 /* pages are not in place: */ 483 int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; 484 in_page = desc->pages[i]; 485 } else { 486 in_page = sg_page(sg); 487 } 488 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 489 sg->offset); 490 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 491 sg->offset); 492 desc->fragno++; 493 desc->fraglen += sg->length; 494 desc->pos += sg->length; 495 496 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1); 497 thislen -= fraglen; 498 499 if (thislen == 0) 500 return 0; 501 502 sg_mark_end(&desc->infrags[desc->fragno - 1]); 503 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 504 505 skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags, 506 thislen, desc->iv); 507 508 ret = crypto_skcipher_encrypt(desc->req); 509 if (ret) 510 return ret; 511 512 sg_init_table(desc->infrags, 4); 513 sg_init_table(desc->outfrags, 4); 514 515 if (fraglen) { 516 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 517 sg->offset + sg->length - fraglen); 518 desc->infrags[0] = desc->outfrags[0]; 519 sg_assign_page(&desc->infrags[0], in_page); 520 desc->fragno = 1; 521 desc->fraglen = fraglen; 522 } else { 523 desc->fragno = 0; 524 desc->fraglen = 0; 525 } 526 return 0; 527 } 528 529 int 530 gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf, 531 int offset, struct page **pages) 532 { 533 int ret; 534 struct encryptor_desc desc; 535 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); 536 537 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0); 538 539 skcipher_request_set_sync_tfm(req, tfm); 540 skcipher_request_set_callback(req, 0, NULL, NULL); 541 542 memset(desc.iv, 0, sizeof(desc.iv)); 543 desc.req = req; 544 desc.pos = offset; 545 desc.outbuf = buf; 546 desc.pages = pages; 547 desc.fragno = 0; 548 desc.fraglen = 0; 549 550 sg_init_table(desc.infrags, 4); 551 sg_init_table(desc.outfrags, 4); 552 553 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 554 skcipher_request_zero(req); 555 return ret; 556 } 557 558 struct decryptor_desc { 559 u8 iv[GSS_KRB5_MAX_BLOCKSIZE]; 560 struct skcipher_request *req; 561 struct scatterlist frags[4]; 562 int fragno; 563 int fraglen; 564 }; 565 566 static int 567 decryptor(struct scatterlist *sg, void *data) 568 { 569 struct decryptor_desc *desc = data; 570 int thislen = desc->fraglen + sg->length; 571 struct crypto_sync_skcipher *tfm = 572 crypto_sync_skcipher_reqtfm(desc->req); 573 int fraglen, ret; 574 575 /* Worst case is 4 fragments: head, end of page 1, start 576 * of page 2, tail. Anything more is a bug. */ 577 BUG_ON(desc->fragno > 3); 578 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 579 sg->offset); 580 desc->fragno++; 581 desc->fraglen += sg->length; 582 583 fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1); 584 thislen -= fraglen; 585 586 if (thislen == 0) 587 return 0; 588 589 sg_mark_end(&desc->frags[desc->fragno - 1]); 590 591 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, 592 thislen, desc->iv); 593 594 ret = crypto_skcipher_decrypt(desc->req); 595 if (ret) 596 return ret; 597 598 sg_init_table(desc->frags, 4); 599 600 if (fraglen) { 601 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 602 sg->offset + sg->length - fraglen); 603 desc->fragno = 1; 604 desc->fraglen = fraglen; 605 } else { 606 desc->fragno = 0; 607 desc->fraglen = 0; 608 } 609 return 0; 610 } 611 612 int 613 gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf, 614 int offset) 615 { 616 int ret; 617 struct decryptor_desc desc; 618 SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); 619 620 /* XXXJBF: */ 621 BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0); 622 623 skcipher_request_set_sync_tfm(req, tfm); 624 skcipher_request_set_callback(req, 0, NULL, NULL); 625 626 memset(desc.iv, 0, sizeof(desc.iv)); 627 desc.req = req; 628 desc.fragno = 0; 629 desc.fraglen = 0; 630 631 sg_init_table(desc.frags, 4); 632 633 ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 634 skcipher_request_zero(req); 635 return ret; 636 } 637 638 /* 639 * This function makes the assumption that it was ultimately called 640 * from gss_wrap(). 641 * 642 * The client auth_gss code moves any existing tail data into a 643 * separate page before calling gss_wrap. 644 * The server svcauth_gss code ensures that both the head and the 645 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap. 646 * 647 * Even with that guarantee, this function may be called more than 648 * once in the processing of gss_wrap(). The best we can do is 649 * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the 650 * largest expected shift will fit within RPC_MAX_AUTH_SIZE. 651 * At run-time we can verify that a single invocation of this 652 * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE. 653 */ 654 655 int 656 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen) 657 { 658 u8 *p; 659 660 if (shiftlen == 0) 661 return 0; 662 663 BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE); 664 BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE); 665 666 p = buf->head[0].iov_base + base; 667 668 memmove(p + shiftlen, p, buf->head[0].iov_len - base); 669 670 buf->head[0].iov_len += shiftlen; 671 buf->len += shiftlen; 672 673 return 0; 674 } 675 676 static u32 677 gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf, 678 u32 offset, u8 *iv, struct page **pages, int encrypt) 679 { 680 u32 ret; 681 struct scatterlist sg[1]; 682 SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher); 683 u8 *data; 684 struct page **save_pages; 685 u32 len = buf->len - offset; 686 687 if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) { 688 WARN_ON(0); 689 return -ENOMEM; 690 } 691 data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS); 692 if (!data) 693 return -ENOMEM; 694 695 /* 696 * For encryption, we want to read from the cleartext 697 * page cache pages, and write the encrypted data to 698 * the supplied xdr_buf pages. 699 */ 700 save_pages = buf->pages; 701 if (encrypt) 702 buf->pages = pages; 703 704 ret = read_bytes_from_xdr_buf(buf, offset, data, len); 705 buf->pages = save_pages; 706 if (ret) 707 goto out; 708 709 sg_init_one(sg, data, len); 710 711 skcipher_request_set_sync_tfm(req, cipher); 712 skcipher_request_set_callback(req, 0, NULL, NULL); 713 skcipher_request_set_crypt(req, sg, sg, len, iv); 714 715 if (encrypt) 716 ret = crypto_skcipher_encrypt(req); 717 else 718 ret = crypto_skcipher_decrypt(req); 719 720 skcipher_request_zero(req); 721 722 if (ret) 723 goto out; 724 725 ret = write_bytes_to_xdr_buf(buf, offset, data, len); 726 727 out: 728 kfree(data); 729 return ret; 730 } 731 732 u32 733 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, 734 struct xdr_buf *buf, struct page **pages) 735 { 736 u32 err; 737 struct xdr_netobj hmac; 738 u8 *cksumkey; 739 u8 *ecptr; 740 struct crypto_sync_skcipher *cipher, *aux_cipher; 741 int blocksize; 742 struct page **save_pages; 743 int nblocks, nbytes; 744 struct encryptor_desc desc; 745 u32 cbcbytes; 746 unsigned int usage; 747 748 if (kctx->initiate) { 749 cipher = kctx->initiator_enc; 750 aux_cipher = kctx->initiator_enc_aux; 751 cksumkey = kctx->initiator_integ; 752 usage = KG_USAGE_INITIATOR_SEAL; 753 } else { 754 cipher = kctx->acceptor_enc; 755 aux_cipher = kctx->acceptor_enc_aux; 756 cksumkey = kctx->acceptor_integ; 757 usage = KG_USAGE_ACCEPTOR_SEAL; 758 } 759 blocksize = crypto_sync_skcipher_blocksize(cipher); 760 761 /* hide the gss token header and insert the confounder */ 762 offset += GSS_KRB5_TOK_HDR_LEN; 763 if (xdr_extend_head(buf, offset, kctx->gk5e->conflen)) 764 return GSS_S_FAILURE; 765 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen); 766 offset -= GSS_KRB5_TOK_HDR_LEN; 767 768 if (buf->tail[0].iov_base != NULL) { 769 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; 770 } else { 771 buf->tail[0].iov_base = buf->head[0].iov_base 772 + buf->head[0].iov_len; 773 buf->tail[0].iov_len = 0; 774 ecptr = buf->tail[0].iov_base; 775 } 776 777 /* copy plaintext gss token header after filler (if any) */ 778 memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN); 779 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; 780 buf->len += GSS_KRB5_TOK_HDR_LEN; 781 782 /* Do the HMAC */ 783 hmac.len = GSS_KRB5_MAX_CKSUM_LEN; 784 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; 785 786 /* 787 * When we are called, pages points to the real page cache 788 * data -- which we can't go and encrypt! buf->pages points 789 * to scratch pages which we are going to send off to the 790 * client/server. Swap in the plaintext pages to calculate 791 * the hmac. 792 */ 793 save_pages = buf->pages; 794 buf->pages = pages; 795 796 err = make_checksum_v2(kctx, NULL, 0, buf, 797 offset + GSS_KRB5_TOK_HDR_LEN, 798 cksumkey, usage, &hmac); 799 buf->pages = save_pages; 800 if (err) 801 return GSS_S_FAILURE; 802 803 nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN; 804 nblocks = (nbytes + blocksize - 1) / blocksize; 805 cbcbytes = 0; 806 if (nblocks > 2) 807 cbcbytes = (nblocks - 2) * blocksize; 808 809 memset(desc.iv, 0, sizeof(desc.iv)); 810 811 if (cbcbytes) { 812 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 813 814 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; 815 desc.fragno = 0; 816 desc.fraglen = 0; 817 desc.pages = pages; 818 desc.outbuf = buf; 819 desc.req = req; 820 821 skcipher_request_set_sync_tfm(req, aux_cipher); 822 skcipher_request_set_callback(req, 0, NULL, NULL); 823 824 sg_init_table(desc.infrags, 4); 825 sg_init_table(desc.outfrags, 4); 826 827 err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN, 828 cbcbytes, encryptor, &desc); 829 skcipher_request_zero(req); 830 if (err) 831 goto out_err; 832 } 833 834 /* Make sure IV carries forward from any CBC results. */ 835 err = gss_krb5_cts_crypt(cipher, buf, 836 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes, 837 desc.iv, pages, 1); 838 if (err) { 839 err = GSS_S_FAILURE; 840 goto out_err; 841 } 842 843 /* Now update buf to account for HMAC */ 844 buf->tail[0].iov_len += kctx->gk5e->cksumlength; 845 buf->len += kctx->gk5e->cksumlength; 846 847 out_err: 848 if (err) 849 err = GSS_S_FAILURE; 850 return err; 851 } 852 853 u32 854 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, 855 struct xdr_buf *buf, u32 *headskip, u32 *tailskip) 856 { 857 struct xdr_buf subbuf; 858 u32 ret = 0; 859 u8 *cksum_key; 860 struct crypto_sync_skcipher *cipher, *aux_cipher; 861 struct xdr_netobj our_hmac_obj; 862 u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 863 u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN]; 864 int nblocks, blocksize, cbcbytes; 865 struct decryptor_desc desc; 866 unsigned int usage; 867 868 if (kctx->initiate) { 869 cipher = kctx->acceptor_enc; 870 aux_cipher = kctx->acceptor_enc_aux; 871 cksum_key = kctx->acceptor_integ; 872 usage = KG_USAGE_ACCEPTOR_SEAL; 873 } else { 874 cipher = kctx->initiator_enc; 875 aux_cipher = kctx->initiator_enc_aux; 876 cksum_key = kctx->initiator_integ; 877 usage = KG_USAGE_INITIATOR_SEAL; 878 } 879 blocksize = crypto_sync_skcipher_blocksize(cipher); 880 881 882 /* create a segment skipping the header and leaving out the checksum */ 883 xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN, 884 (len - offset - GSS_KRB5_TOK_HDR_LEN - 885 kctx->gk5e->cksumlength)); 886 887 nblocks = (subbuf.len + blocksize - 1) / blocksize; 888 889 cbcbytes = 0; 890 if (nblocks > 2) 891 cbcbytes = (nblocks - 2) * blocksize; 892 893 memset(desc.iv, 0, sizeof(desc.iv)); 894 895 if (cbcbytes) { 896 SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher); 897 898 desc.fragno = 0; 899 desc.fraglen = 0; 900 desc.req = req; 901 902 skcipher_request_set_sync_tfm(req, aux_cipher); 903 skcipher_request_set_callback(req, 0, NULL, NULL); 904 905 sg_init_table(desc.frags, 4); 906 907 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); 908 skcipher_request_zero(req); 909 if (ret) 910 goto out_err; 911 } 912 913 /* Make sure IV carries forward from any CBC results. */ 914 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); 915 if (ret) 916 goto out_err; 917 918 919 /* Calculate our hmac over the plaintext data */ 920 our_hmac_obj.len = sizeof(our_hmac); 921 our_hmac_obj.data = our_hmac; 922 923 ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0, 924 cksum_key, usage, &our_hmac_obj); 925 if (ret) 926 goto out_err; 927 928 /* Get the packet's hmac value */ 929 ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength, 930 pkt_hmac, kctx->gk5e->cksumlength); 931 if (ret) 932 goto out_err; 933 934 if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) { 935 ret = GSS_S_BAD_SIG; 936 goto out_err; 937 } 938 *headskip = kctx->gk5e->conflen; 939 *tailskip = kctx->gk5e->cksumlength; 940 out_err: 941 if (ret && ret != GSS_S_BAD_SIG) 942 ret = GSS_S_FAILURE; 943 return ret; 944 } 945 946 /* 947 * Compute Kseq given the initial session key and the checksum. 948 * Set the key of the given cipher. 949 */ 950 int 951 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, 952 struct crypto_sync_skcipher *cipher, 953 unsigned char *cksum) 954 { 955 struct crypto_shash *hmac; 956 struct shash_desc *desc; 957 u8 Kseq[GSS_KRB5_MAX_KEYLEN]; 958 u32 zeroconstant = 0; 959 int err; 960 961 dprintk("%s: entered\n", __func__); 962 963 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 964 if (IS_ERR(hmac)) { 965 dprintk("%s: error %ld, allocating hash '%s'\n", 966 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 967 return PTR_ERR(hmac); 968 } 969 970 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 971 GFP_NOFS); 972 if (!desc) { 973 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 974 __func__, kctx->gk5e->cksum_name); 975 crypto_free_shash(hmac); 976 return -ENOMEM; 977 } 978 979 desc->tfm = hmac; 980 981 /* Compute intermediate Kseq from session key */ 982 err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); 983 if (err) 984 goto out_err; 985 986 err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); 987 if (err) 988 goto out_err; 989 990 /* Compute final Kseq from the checksum and intermediate Kseq */ 991 err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); 992 if (err) 993 goto out_err; 994 995 err = crypto_shash_digest(desc, cksum, 8, Kseq); 996 if (err) 997 goto out_err; 998 999 err = crypto_sync_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); 1000 if (err) 1001 goto out_err; 1002 1003 err = 0; 1004 1005 out_err: 1006 kfree_sensitive(desc); 1007 crypto_free_shash(hmac); 1008 dprintk("%s: returning %d\n", __func__, err); 1009 return err; 1010 } 1011 1012 /* 1013 * Compute Kcrypt given the initial session key and the plaintext seqnum. 1014 * Set the key of cipher kctx->enc. 1015 */ 1016 int 1017 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, 1018 struct crypto_sync_skcipher *cipher, 1019 s32 seqnum) 1020 { 1021 struct crypto_shash *hmac; 1022 struct shash_desc *desc; 1023 u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; 1024 u8 zeroconstant[4] = {0}; 1025 u8 seqnumarray[4]; 1026 int err, i; 1027 1028 dprintk("%s: entered, seqnum %u\n", __func__, seqnum); 1029 1030 hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); 1031 if (IS_ERR(hmac)) { 1032 dprintk("%s: error %ld, allocating hash '%s'\n", 1033 __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); 1034 return PTR_ERR(hmac); 1035 } 1036 1037 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), 1038 GFP_NOFS); 1039 if (!desc) { 1040 dprintk("%s: failed to allocate shash descriptor for '%s'\n", 1041 __func__, kctx->gk5e->cksum_name); 1042 crypto_free_shash(hmac); 1043 return -ENOMEM; 1044 } 1045 1046 desc->tfm = hmac; 1047 1048 /* Compute intermediate Kcrypt from session key */ 1049 for (i = 0; i < kctx->gk5e->keylength; i++) 1050 Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; 1051 1052 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1053 if (err) 1054 goto out_err; 1055 1056 err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); 1057 if (err) 1058 goto out_err; 1059 1060 /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ 1061 err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); 1062 if (err) 1063 goto out_err; 1064 1065 seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); 1066 seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); 1067 seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); 1068 seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); 1069 1070 err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); 1071 if (err) 1072 goto out_err; 1073 1074 err = crypto_sync_skcipher_setkey(cipher, Kcrypt, 1075 kctx->gk5e->keylength); 1076 if (err) 1077 goto out_err; 1078 1079 err = 0; 1080 1081 out_err: 1082 kfree_sensitive(desc); 1083 crypto_free_shash(hmac); 1084 dprintk("%s: returning %d\n", __func__, err); 1085 return err; 1086 } 1087