1 /*
2  *  linux/net/sunrpc/gss_krb5_crypto.c
3  *
4  *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
5  *  All rights reserved.
6  *
7  *  Andy Adamson   <andros@umich.edu>
8  *  Bruce Fields   <bfields@umich.edu>
9  */
10 
11 /*
12  * Copyright (C) 1998 by the FundsXpress, INC.
13  *
14  * All rights reserved.
15  *
16  * Export of this software from the United States of America may require
17  * a specific license from the United States Government.  It is the
18  * responsibility of any person or organization contemplating export to
19  * obtain such a license before exporting.
20  *
21  * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22  * distribute this software and its documentation for any purpose and
23  * without fee is hereby granted, provided that the above copyright
24  * notice appear in all copies and that both that copyright notice and
25  * this permission notice appear in supporting documentation, and that
26  * the name of FundsXpress. not be used in advertising or publicity pertaining
27  * to distribution of the software without specific, written prior
28  * permission.  FundsXpress makes no representations about the suitability of
29  * this software for any purpose.  It is provided "as is" without express
30  * or implied warranty.
31  *
32  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35  */
36 
37 #include <crypto/algapi.h>
38 #include <crypto/hash.h>
39 #include <crypto/skcipher.h>
40 #include <linux/err.h>
41 #include <linux/types.h>
42 #include <linux/mm.h>
43 #include <linux/scatterlist.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/random.h>
47 #include <linux/sunrpc/gss_krb5.h>
48 #include <linux/sunrpc/xdr.h>
49 
50 #include "gss_krb5_internal.h"
51 
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY        RPCDBG_AUTH
54 #endif
55 
56 /**
57  * krb5_make_confounder - Generate a confounder string
58  * @p: memory location into which to write the string
59  * @conflen: string length to write, in octets
60  *
61  * RFCs 1964 and 3961 mention only "a random confounder" without going
62  * into detail about its function or cryptographic requirements. The
63  * assumed purpose is to prevent repeated encryption of a plaintext with
64  * the same key from generating the same ciphertext. It is also used to
65  * pad minimum plaintext length to at least a single cipher block.
66  *
67  * However, in situations like the GSS Kerberos 5 mechanism, where the
68  * encryption IV is always all zeroes, the confounder also effectively
69  * functions like an IV. Thus, not only must it be unique from message
70  * to message, but it must also be difficult to predict. Otherwise an
71  * attacker can correlate the confounder to previous or future values,
72  * making the encryption easier to break.
73  *
74  * Given that the primary consumer of this encryption mechanism is a
75  * network storage protocol, a type of traffic that often carries
76  * predictable payloads (eg, all zeroes when reading unallocated blocks
77  * from a file), our confounder generation has to be cryptographically
78  * strong.
79  */
80 void krb5_make_confounder(u8 *p, int conflen)
81 {
82 	get_random_bytes(p, conflen);
83 }
84 
85 /**
86  * krb5_encrypt - simple encryption of an RPCSEC GSS payload
87  * @tfm: initialized cipher transform
88  * @iv: pointer to an IV
89  * @in: plaintext to encrypt
90  * @out: OUT: ciphertext
91  * @length: length of input and output buffers, in bytes
92  *
93  * @iv may be NULL to force the use of an all-zero IV.
94  * The buffer containing the IV must be as large as the
95  * cipher's ivsize.
96  *
97  * Return values:
98  *   %0: @in successfully encrypted into @out
99  *   negative errno: @in not encrypted
100  */
101 u32
102 krb5_encrypt(
103 	struct crypto_sync_skcipher *tfm,
104 	void * iv,
105 	void * in,
106 	void * out,
107 	int length)
108 {
109 	u32 ret = -EINVAL;
110 	struct scatterlist sg[1];
111 	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
112 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
113 
114 	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
115 		goto out;
116 
117 	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
118 		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
119 			crypto_sync_skcipher_ivsize(tfm));
120 		goto out;
121 	}
122 
123 	if (iv)
124 		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
125 
126 	memcpy(out, in, length);
127 	sg_init_one(sg, out, length);
128 
129 	skcipher_request_set_sync_tfm(req, tfm);
130 	skcipher_request_set_callback(req, 0, NULL, NULL);
131 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
132 
133 	ret = crypto_skcipher_encrypt(req);
134 	skcipher_request_zero(req);
135 out:
136 	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
137 	return ret;
138 }
139 
140 /**
141  * krb5_decrypt - simple decryption of an RPCSEC GSS payload
142  * @tfm: initialized cipher transform
143  * @iv: pointer to an IV
144  * @in: ciphertext to decrypt
145  * @out: OUT: plaintext
146  * @length: length of input and output buffers, in bytes
147  *
148  * @iv may be NULL to force the use of an all-zero IV.
149  * The buffer containing the IV must be as large as the
150  * cipher's ivsize.
151  *
152  * Return values:
153  *   %0: @in successfully decrypted into @out
154  *   negative errno: @in not decrypted
155  */
156 u32
157 krb5_decrypt(
158      struct crypto_sync_skcipher *tfm,
159      void * iv,
160      void * in,
161      void * out,
162      int length)
163 {
164 	u32 ret = -EINVAL;
165 	struct scatterlist sg[1];
166 	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
167 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
168 
169 	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
170 		goto out;
171 
172 	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
173 		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
174 			crypto_sync_skcipher_ivsize(tfm));
175 		goto out;
176 	}
177 	if (iv)
178 		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
179 
180 	memcpy(out, in, length);
181 	sg_init_one(sg, out, length);
182 
183 	skcipher_request_set_sync_tfm(req, tfm);
184 	skcipher_request_set_callback(req, 0, NULL, NULL);
185 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
186 
187 	ret = crypto_skcipher_decrypt(req);
188 	skcipher_request_zero(req);
189 out:
190 	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
191 	return ret;
192 }
193 
194 static int
195 checksummer(struct scatterlist *sg, void *data)
196 {
197 	struct ahash_request *req = data;
198 
199 	ahash_request_set_crypt(req, sg, NULL, sg->length);
200 
201 	return crypto_ahash_update(req);
202 }
203 
204 /*
205  * checksum the plaintext data and hdrlen bytes of the token header
206  * The checksum is performed over the first 8 bytes of the
207  * gss token header and then over the data body
208  */
209 u32
210 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
211 	      struct xdr_buf *body, int body_offset, u8 *cksumkey,
212 	      unsigned int usage, struct xdr_netobj *cksumout)
213 {
214 	struct crypto_ahash *tfm;
215 	struct ahash_request *req;
216 	struct scatterlist              sg[1];
217 	int err = -1;
218 	u8 *checksumdata;
219 	unsigned int checksumlen;
220 
221 	if (cksumout->len < kctx->gk5e->cksumlength) {
222 		dprintk("%s: checksum buffer length, %u, too small for %s\n",
223 			__func__, cksumout->len, kctx->gk5e->name);
224 		return GSS_S_FAILURE;
225 	}
226 
227 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
228 	if (checksumdata == NULL)
229 		return GSS_S_FAILURE;
230 
231 	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
232 	if (IS_ERR(tfm))
233 		goto out_free_cksum;
234 
235 	req = ahash_request_alloc(tfm, GFP_KERNEL);
236 	if (!req)
237 		goto out_free_ahash;
238 
239 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
240 
241 	checksumlen = crypto_ahash_digestsize(tfm);
242 
243 	if (cksumkey != NULL) {
244 		err = crypto_ahash_setkey(tfm, cksumkey,
245 					  kctx->gk5e->keylength);
246 		if (err)
247 			goto out;
248 	}
249 
250 	err = crypto_ahash_init(req);
251 	if (err)
252 		goto out;
253 	sg_init_one(sg, header, hdrlen);
254 	ahash_request_set_crypt(req, sg, NULL, hdrlen);
255 	err = crypto_ahash_update(req);
256 	if (err)
257 		goto out;
258 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
259 			      checksummer, req);
260 	if (err)
261 		goto out;
262 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
263 	err = crypto_ahash_final(req);
264 	if (err)
265 		goto out;
266 
267 	switch (kctx->gk5e->ctype) {
268 	case CKSUMTYPE_RSA_MD5:
269 		err = krb5_encrypt(kctx->seq, NULL, checksumdata,
270 				   checksumdata, checksumlen);
271 		if (err)
272 			goto out;
273 		memcpy(cksumout->data,
274 		       checksumdata + checksumlen - kctx->gk5e->cksumlength,
275 		       kctx->gk5e->cksumlength);
276 		break;
277 	case CKSUMTYPE_HMAC_SHA1_DES3:
278 		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
279 		break;
280 	default:
281 		BUG();
282 		break;
283 	}
284 	cksumout->len = kctx->gk5e->cksumlength;
285 out:
286 	ahash_request_free(req);
287 out_free_ahash:
288 	crypto_free_ahash(tfm);
289 out_free_cksum:
290 	kfree(checksumdata);
291 	return err ? GSS_S_FAILURE : 0;
292 }
293 
294 /**
295  * gss_krb5_checksum - Compute the MAC for a GSS Wrap or MIC token
296  * @tfm: an initialized hash transform
297  * @header: pointer to a buffer containing the token header, or NULL
298  * @hdrlen: number of octets in @header
299  * @body: xdr_buf containing an RPC message (body.len is the message length)
300  * @body_offset: byte offset into @body to start checksumming
301  * @cksumout: OUT: a buffer to be filled in with the computed HMAC
302  *
303  * Usually expressed as H = HMAC(K, message)[1..h] .
304  *
305  * Caller provides the truncation length of the output token (h) in
306  * cksumout.len.
307  *
308  * Return values:
309  *   %GSS_S_COMPLETE: Digest computed, @cksumout filled in
310  *   %GSS_S_FAILURE: Call failed
311  */
312 u32
313 gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
314 		  const struct xdr_buf *body, int body_offset,
315 		  struct xdr_netobj *cksumout)
316 {
317 	struct ahash_request *req;
318 	int err = -ENOMEM;
319 	u8 *checksumdata;
320 
321 	checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
322 	if (!checksumdata)
323 		return GSS_S_FAILURE;
324 
325 	req = ahash_request_alloc(tfm, GFP_KERNEL);
326 	if (!req)
327 		goto out_free_cksum;
328 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
329 	err = crypto_ahash_init(req);
330 	if (err)
331 		goto out_free_ahash;
332 
333 	/*
334 	 * Per RFC 4121 Section 4.2.4, the checksum is performed over the
335 	 * data body first, then over the octets in "header".
336 	 */
337 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
338 			      checksummer, req);
339 	if (err)
340 		goto out_free_ahash;
341 	if (header) {
342 		struct scatterlist sg[1];
343 
344 		sg_init_one(sg, header, hdrlen);
345 		ahash_request_set_crypt(req, sg, NULL, hdrlen);
346 		err = crypto_ahash_update(req);
347 		if (err)
348 			goto out_free_ahash;
349 	}
350 
351 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
352 	err = crypto_ahash_final(req);
353 	if (err)
354 		goto out_free_ahash;
355 	memcpy(cksumout->data, checksumdata, cksumout->len);
356 
357 out_free_ahash:
358 	ahash_request_free(req);
359 out_free_cksum:
360 	kfree_sensitive(checksumdata);
361 	return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
362 }
363 
364 struct encryptor_desc {
365 	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
366 	struct skcipher_request *req;
367 	int pos;
368 	struct xdr_buf *outbuf;
369 	struct page **pages;
370 	struct scatterlist infrags[4];
371 	struct scatterlist outfrags[4];
372 	int fragno;
373 	int fraglen;
374 };
375 
376 static int
377 encryptor(struct scatterlist *sg, void *data)
378 {
379 	struct encryptor_desc *desc = data;
380 	struct xdr_buf *outbuf = desc->outbuf;
381 	struct crypto_sync_skcipher *tfm =
382 		crypto_sync_skcipher_reqtfm(desc->req);
383 	struct page *in_page;
384 	int thislen = desc->fraglen + sg->length;
385 	int fraglen, ret;
386 	int page_pos;
387 
388 	/* Worst case is 4 fragments: head, end of page 1, start
389 	 * of page 2, tail.  Anything more is a bug. */
390 	BUG_ON(desc->fragno > 3);
391 
392 	page_pos = desc->pos - outbuf->head[0].iov_len;
393 	if (page_pos >= 0 && page_pos < outbuf->page_len) {
394 		/* pages are not in place: */
395 		int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
396 		in_page = desc->pages[i];
397 	} else {
398 		in_page = sg_page(sg);
399 	}
400 	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
401 		    sg->offset);
402 	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
403 		    sg->offset);
404 	desc->fragno++;
405 	desc->fraglen += sg->length;
406 	desc->pos += sg->length;
407 
408 	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
409 	thislen -= fraglen;
410 
411 	if (thislen == 0)
412 		return 0;
413 
414 	sg_mark_end(&desc->infrags[desc->fragno - 1]);
415 	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
416 
417 	skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
418 				   thislen, desc->iv);
419 
420 	ret = crypto_skcipher_encrypt(desc->req);
421 	if (ret)
422 		return ret;
423 
424 	sg_init_table(desc->infrags, 4);
425 	sg_init_table(desc->outfrags, 4);
426 
427 	if (fraglen) {
428 		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
429 				sg->offset + sg->length - fraglen);
430 		desc->infrags[0] = desc->outfrags[0];
431 		sg_assign_page(&desc->infrags[0], in_page);
432 		desc->fragno = 1;
433 		desc->fraglen = fraglen;
434 	} else {
435 		desc->fragno = 0;
436 		desc->fraglen = 0;
437 	}
438 	return 0;
439 }
440 
441 int
442 gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
443 		    int offset, struct page **pages)
444 {
445 	int ret;
446 	struct encryptor_desc desc;
447 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
448 
449 	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
450 
451 	skcipher_request_set_sync_tfm(req, tfm);
452 	skcipher_request_set_callback(req, 0, NULL, NULL);
453 
454 	memset(desc.iv, 0, sizeof(desc.iv));
455 	desc.req = req;
456 	desc.pos = offset;
457 	desc.outbuf = buf;
458 	desc.pages = pages;
459 	desc.fragno = 0;
460 	desc.fraglen = 0;
461 
462 	sg_init_table(desc.infrags, 4);
463 	sg_init_table(desc.outfrags, 4);
464 
465 	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
466 	skcipher_request_zero(req);
467 	return ret;
468 }
469 
470 struct decryptor_desc {
471 	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
472 	struct skcipher_request *req;
473 	struct scatterlist frags[4];
474 	int fragno;
475 	int fraglen;
476 };
477 
478 static int
479 decryptor(struct scatterlist *sg, void *data)
480 {
481 	struct decryptor_desc *desc = data;
482 	int thislen = desc->fraglen + sg->length;
483 	struct crypto_sync_skcipher *tfm =
484 		crypto_sync_skcipher_reqtfm(desc->req);
485 	int fraglen, ret;
486 
487 	/* Worst case is 4 fragments: head, end of page 1, start
488 	 * of page 2, tail.  Anything more is a bug. */
489 	BUG_ON(desc->fragno > 3);
490 	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
491 		    sg->offset);
492 	desc->fragno++;
493 	desc->fraglen += sg->length;
494 
495 	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
496 	thislen -= fraglen;
497 
498 	if (thislen == 0)
499 		return 0;
500 
501 	sg_mark_end(&desc->frags[desc->fragno - 1]);
502 
503 	skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
504 				   thislen, desc->iv);
505 
506 	ret = crypto_skcipher_decrypt(desc->req);
507 	if (ret)
508 		return ret;
509 
510 	sg_init_table(desc->frags, 4);
511 
512 	if (fraglen) {
513 		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
514 				sg->offset + sg->length - fraglen);
515 		desc->fragno = 1;
516 		desc->fraglen = fraglen;
517 	} else {
518 		desc->fragno = 0;
519 		desc->fraglen = 0;
520 	}
521 	return 0;
522 }
523 
524 int
525 gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
526 		    int offset)
527 {
528 	int ret;
529 	struct decryptor_desc desc;
530 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
531 
532 	/* XXXJBF: */
533 	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
534 
535 	skcipher_request_set_sync_tfm(req, tfm);
536 	skcipher_request_set_callback(req, 0, NULL, NULL);
537 
538 	memset(desc.iv, 0, sizeof(desc.iv));
539 	desc.req = req;
540 	desc.fragno = 0;
541 	desc.fraglen = 0;
542 
543 	sg_init_table(desc.frags, 4);
544 
545 	ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
546 	skcipher_request_zero(req);
547 	return ret;
548 }
549 
550 /*
551  * This function makes the assumption that it was ultimately called
552  * from gss_wrap().
553  *
554  * The client auth_gss code moves any existing tail data into a
555  * separate page before calling gss_wrap.
556  * The server svcauth_gss code ensures that both the head and the
557  * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
558  *
559  * Even with that guarantee, this function may be called more than
560  * once in the processing of gss_wrap().  The best we can do is
561  * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
562  * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
563  * At run-time we can verify that a single invocation of this
564  * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
565  */
566 
567 int
568 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
569 {
570 	u8 *p;
571 
572 	if (shiftlen == 0)
573 		return 0;
574 
575 	BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
576 	BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
577 
578 	p = buf->head[0].iov_base + base;
579 
580 	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
581 
582 	buf->head[0].iov_len += shiftlen;
583 	buf->len += shiftlen;
584 
585 	return 0;
586 }
587 
588 static u32
589 gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
590 		   u32 offset, u8 *iv, struct page **pages, int encrypt)
591 {
592 	u32 ret;
593 	struct scatterlist sg[1];
594 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
595 	u8 *data;
596 	struct page **save_pages;
597 	u32 len = buf->len - offset;
598 
599 	if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
600 		WARN_ON(0);
601 		return -ENOMEM;
602 	}
603 	data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL);
604 	if (!data)
605 		return -ENOMEM;
606 
607 	/*
608 	 * For encryption, we want to read from the cleartext
609 	 * page cache pages, and write the encrypted data to
610 	 * the supplied xdr_buf pages.
611 	 */
612 	save_pages = buf->pages;
613 	if (encrypt)
614 		buf->pages = pages;
615 
616 	ret = read_bytes_from_xdr_buf(buf, offset, data, len);
617 	buf->pages = save_pages;
618 	if (ret)
619 		goto out;
620 
621 	sg_init_one(sg, data, len);
622 
623 	skcipher_request_set_sync_tfm(req, cipher);
624 	skcipher_request_set_callback(req, 0, NULL, NULL);
625 	skcipher_request_set_crypt(req, sg, sg, len, iv);
626 
627 	if (encrypt)
628 		ret = crypto_skcipher_encrypt(req);
629 	else
630 		ret = crypto_skcipher_decrypt(req);
631 
632 	skcipher_request_zero(req);
633 
634 	if (ret)
635 		goto out;
636 
637 	ret = write_bytes_to_xdr_buf(buf, offset, data, len);
638 
639 out:
640 	kfree(data);
641 	return ret;
642 }
643 
644 /*
645  * To provide confidentiality, encrypt using cipher block chaining
646  * with ciphertext stealing. Message integrity is handled separately.
647  */
648 static int
649 krb5_cbc_cts_encrypt(struct crypto_sync_skcipher *cts_tfm,
650 		     struct crypto_sync_skcipher *cbc_tfm,
651 		     u32 offset, struct xdr_buf *buf, struct page **pages)
652 {
653 	u32 blocksize, nbytes, nblocks, cbcbytes;
654 	struct encryptor_desc desc;
655 	int err;
656 
657 	blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
658 	nbytes = buf->len - offset;
659 	nblocks = (nbytes + blocksize - 1) / blocksize;
660 	cbcbytes = 0;
661 	if (nblocks > 2)
662 		cbcbytes = (nblocks - 2) * blocksize;
663 
664 	memset(desc.iv, 0, sizeof(desc.iv));
665 
666 	/* Handle block-sized chunks of plaintext with CBC. */
667 	if (cbcbytes) {
668 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
669 
670 		desc.pos = offset;
671 		desc.fragno = 0;
672 		desc.fraglen = 0;
673 		desc.pages = pages;
674 		desc.outbuf = buf;
675 		desc.req = req;
676 
677 		skcipher_request_set_sync_tfm(req, cbc_tfm);
678 		skcipher_request_set_callback(req, 0, NULL, NULL);
679 
680 		sg_init_table(desc.infrags, 4);
681 		sg_init_table(desc.outfrags, 4);
682 
683 		err = xdr_process_buf(buf, offset, cbcbytes, encryptor, &desc);
684 		skcipher_request_zero(req);
685 		if (err)
686 			return err;
687 	}
688 
689 	/* Remaining plaintext is handled with CBC-CTS. */
690 	err = gss_krb5_cts_crypt(cts_tfm, buf, offset + cbcbytes,
691 				 desc.iv, pages, 1);
692 	if (err)
693 		return err;
694 
695 	return 0;
696 }
697 
698 static int
699 krb5_cbc_cts_decrypt(struct crypto_sync_skcipher *cts_tfm,
700 		     struct crypto_sync_skcipher *cbc_tfm,
701 		     u32 offset, struct xdr_buf *buf)
702 {
703 	u32 blocksize, nblocks, cbcbytes;
704 	struct decryptor_desc desc;
705 	int err;
706 
707 	blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
708 	nblocks = (buf->len + blocksize - 1) / blocksize;
709 	cbcbytes = 0;
710 	if (nblocks > 2)
711 		cbcbytes = (nblocks - 2) * blocksize;
712 
713 	memset(desc.iv, 0, sizeof(desc.iv));
714 
715 	/* Handle block-sized chunks of plaintext with CBC. */
716 	if (cbcbytes) {
717 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
718 
719 		desc.fragno = 0;
720 		desc.fraglen = 0;
721 		desc.req = req;
722 
723 		skcipher_request_set_sync_tfm(req, cbc_tfm);
724 		skcipher_request_set_callback(req, 0, NULL, NULL);
725 
726 		sg_init_table(desc.frags, 4);
727 
728 		err = xdr_process_buf(buf, 0, cbcbytes, decryptor, &desc);
729 		skcipher_request_zero(req);
730 		if (err)
731 			return err;
732 	}
733 
734 	/* Remaining plaintext is handled with CBC-CTS. */
735 	return gss_krb5_cts_crypt(cts_tfm, buf, cbcbytes, desc.iv, NULL, 0);
736 }
737 
738 u32
739 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
740 		     struct xdr_buf *buf, struct page **pages)
741 {
742 	u32 err;
743 	struct xdr_netobj hmac;
744 	u8 *ecptr;
745 	struct crypto_sync_skcipher *cipher, *aux_cipher;
746 	struct crypto_ahash *ahash;
747 	struct page **save_pages;
748 	unsigned int conflen;
749 
750 	if (kctx->initiate) {
751 		cipher = kctx->initiator_enc;
752 		aux_cipher = kctx->initiator_enc_aux;
753 		ahash = kctx->initiator_integ;
754 	} else {
755 		cipher = kctx->acceptor_enc;
756 		aux_cipher = kctx->acceptor_enc_aux;
757 		ahash = kctx->acceptor_integ;
758 	}
759 	conflen = crypto_sync_skcipher_blocksize(cipher);
760 
761 	/* hide the gss token header and insert the confounder */
762 	offset += GSS_KRB5_TOK_HDR_LEN;
763 	if (xdr_extend_head(buf, offset, conflen))
764 		return GSS_S_FAILURE;
765 	krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
766 	offset -= GSS_KRB5_TOK_HDR_LEN;
767 
768 	if (buf->tail[0].iov_base != NULL) {
769 		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
770 	} else {
771 		buf->tail[0].iov_base = buf->head[0].iov_base
772 							+ buf->head[0].iov_len;
773 		buf->tail[0].iov_len = 0;
774 		ecptr = buf->tail[0].iov_base;
775 	}
776 
777 	/* copy plaintext gss token header after filler (if any) */
778 	memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
779 	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
780 	buf->len += GSS_KRB5_TOK_HDR_LEN;
781 
782 	/* Do the HMAC */
783 	hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
784 	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
785 
786 	/*
787 	 * When we are called, pages points to the real page cache
788 	 * data -- which we can't go and encrypt!  buf->pages points
789 	 * to scratch pages which we are going to send off to the
790 	 * client/server.  Swap in the plaintext pages to calculate
791 	 * the hmac.
792 	 */
793 	save_pages = buf->pages;
794 	buf->pages = pages;
795 
796 	err = gss_krb5_checksum(ahash, NULL, 0, buf,
797 				offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
798 	buf->pages = save_pages;
799 	if (err)
800 		return GSS_S_FAILURE;
801 
802 	err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
803 				   offset + GSS_KRB5_TOK_HDR_LEN,
804 				   buf, pages);
805 	if (err)
806 		return GSS_S_FAILURE;
807 
808 	/* Now update buf to account for HMAC */
809 	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
810 	buf->len += kctx->gk5e->cksumlength;
811 
812 	return GSS_S_COMPLETE;
813 }
814 
815 u32
816 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
817 		     struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
818 {
819 	struct crypto_sync_skcipher *cipher, *aux_cipher;
820 	struct crypto_ahash *ahash;
821 	struct xdr_netobj our_hmac_obj;
822 	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
823 	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
824 	struct xdr_buf subbuf;
825 	u32 ret = 0;
826 
827 	if (kctx->initiate) {
828 		cipher = kctx->acceptor_enc;
829 		aux_cipher = kctx->acceptor_enc_aux;
830 		ahash = kctx->acceptor_integ;
831 	} else {
832 		cipher = kctx->initiator_enc;
833 		aux_cipher = kctx->initiator_enc_aux;
834 		ahash = kctx->initiator_integ;
835 	}
836 
837 	/* create a segment skipping the header and leaving out the checksum */
838 	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
839 				    (len - offset - GSS_KRB5_TOK_HDR_LEN -
840 				     kctx->gk5e->cksumlength));
841 
842 	ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
843 	if (ret)
844 		goto out_err;
845 
846 	/* Calculate our hmac over the plaintext data */
847 	our_hmac_obj.len = sizeof(our_hmac);
848 	our_hmac_obj.data = our_hmac;
849 	ret = gss_krb5_checksum(ahash, NULL, 0, &subbuf, 0, &our_hmac_obj);
850 	if (ret)
851 		goto out_err;
852 
853 	/* Get the packet's hmac value */
854 	ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
855 				      pkt_hmac, kctx->gk5e->cksumlength);
856 	if (ret)
857 		goto out_err;
858 
859 	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
860 		ret = GSS_S_BAD_SIG;
861 		goto out_err;
862 	}
863 	*headskip = crypto_sync_skcipher_blocksize(cipher);
864 	*tailskip = kctx->gk5e->cksumlength;
865 out_err:
866 	if (ret && ret != GSS_S_BAD_SIG)
867 		ret = GSS_S_FAILURE;
868 	return ret;
869 }
870