1 /*
2  *  linux/net/sunrpc/gss_krb5_crypto.c
3  *
4  *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
5  *  All rights reserved.
6  *
7  *  Andy Adamson   <andros@umich.edu>
8  *  Bruce Fields   <bfields@umich.edu>
9  */
10 
11 /*
12  * Copyright (C) 1998 by the FundsXpress, INC.
13  *
14  * All rights reserved.
15  *
16  * Export of this software from the United States of America may require
17  * a specific license from the United States Government.  It is the
18  * responsibility of any person or organization contemplating export to
19  * obtain such a license before exporting.
20  *
21  * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22  * distribute this software and its documentation for any purpose and
23  * without fee is hereby granted, provided that the above copyright
24  * notice appear in all copies and that both that copyright notice and
25  * this permission notice appear in supporting documentation, and that
26  * the name of FundsXpress. not be used in advertising or publicity pertaining
27  * to distribution of the software without specific, written prior
28  * permission.  FundsXpress makes no representations about the suitability of
29  * this software for any purpose.  It is provided "as is" without express
30  * or implied warranty.
31  *
32  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35  */
36 
37 #include <crypto/algapi.h>
38 #include <crypto/hash.h>
39 #include <crypto/skcipher.h>
40 #include <linux/err.h>
41 #include <linux/types.h>
42 #include <linux/mm.h>
43 #include <linux/scatterlist.h>
44 #include <linux/highmem.h>
45 #include <linux/pagemap.h>
46 #include <linux/random.h>
47 #include <linux/sunrpc/gss_krb5.h>
48 #include <linux/sunrpc/xdr.h>
49 
50 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
51 # define RPCDBG_FACILITY        RPCDBG_AUTH
52 #endif
53 
54 u32
55 krb5_encrypt(
56 	struct crypto_skcipher *tfm,
57 	void * iv,
58 	void * in,
59 	void * out,
60 	int length)
61 {
62 	u32 ret = -EINVAL;
63 	struct scatterlist sg[1];
64 	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
65 	SKCIPHER_REQUEST_ON_STACK(req, tfm);
66 
67 	if (length % crypto_skcipher_blocksize(tfm) != 0)
68 		goto out;
69 
70 	if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
71 		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
72 			crypto_skcipher_ivsize(tfm));
73 		goto out;
74 	}
75 
76 	if (iv)
77 		memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
78 
79 	memcpy(out, in, length);
80 	sg_init_one(sg, out, length);
81 
82 	skcipher_request_set_tfm(req, tfm);
83 	skcipher_request_set_callback(req, 0, NULL, NULL);
84 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
85 
86 	ret = crypto_skcipher_encrypt(req);
87 	skcipher_request_zero(req);
88 out:
89 	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
90 	return ret;
91 }
92 
93 u32
94 krb5_decrypt(
95      struct crypto_skcipher *tfm,
96      void * iv,
97      void * in,
98      void * out,
99      int length)
100 {
101 	u32 ret = -EINVAL;
102 	struct scatterlist sg[1];
103 	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
104 	SKCIPHER_REQUEST_ON_STACK(req, tfm);
105 
106 	if (length % crypto_skcipher_blocksize(tfm) != 0)
107 		goto out;
108 
109 	if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
110 		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
111 			crypto_skcipher_ivsize(tfm));
112 		goto out;
113 	}
114 	if (iv)
115 		memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
116 
117 	memcpy(out, in, length);
118 	sg_init_one(sg, out, length);
119 
120 	skcipher_request_set_tfm(req, tfm);
121 	skcipher_request_set_callback(req, 0, NULL, NULL);
122 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
123 
124 	ret = crypto_skcipher_decrypt(req);
125 	skcipher_request_zero(req);
126 out:
127 	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
128 	return ret;
129 }
130 
131 static int
132 checksummer(struct scatterlist *sg, void *data)
133 {
134 	struct ahash_request *req = data;
135 
136 	ahash_request_set_crypt(req, sg, NULL, sg->length);
137 
138 	return crypto_ahash_update(req);
139 }
140 
141 static int
142 arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
143 {
144 	unsigned int ms_usage;
145 
146 	switch (usage) {
147 	case KG_USAGE_SIGN:
148 		ms_usage = 15;
149 		break;
150 	case KG_USAGE_SEAL:
151 		ms_usage = 13;
152 		break;
153 	default:
154 		return -EINVAL;
155 	}
156 	salt[0] = (ms_usage >> 0) & 0xff;
157 	salt[1] = (ms_usage >> 8) & 0xff;
158 	salt[2] = (ms_usage >> 16) & 0xff;
159 	salt[3] = (ms_usage >> 24) & 0xff;
160 
161 	return 0;
162 }
163 
164 static u32
165 make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
166 		       struct xdr_buf *body, int body_offset, u8 *cksumkey,
167 		       unsigned int usage, struct xdr_netobj *cksumout)
168 {
169 	struct scatterlist              sg[1];
170 	int err = -1;
171 	u8 *checksumdata;
172 	u8 rc4salt[4];
173 	struct crypto_ahash *md5;
174 	struct crypto_ahash *hmac_md5;
175 	struct ahash_request *req;
176 
177 	if (cksumkey == NULL)
178 		return GSS_S_FAILURE;
179 
180 	if (cksumout->len < kctx->gk5e->cksumlength) {
181 		dprintk("%s: checksum buffer length, %u, too small for %s\n",
182 			__func__, cksumout->len, kctx->gk5e->name);
183 		return GSS_S_FAILURE;
184 	}
185 
186 	if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
187 		dprintk("%s: invalid usage value %u\n", __func__, usage);
188 		return GSS_S_FAILURE;
189 	}
190 
191 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
192 	if (!checksumdata)
193 		return GSS_S_FAILURE;
194 
195 	md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
196 	if (IS_ERR(md5))
197 		goto out_free_cksum;
198 
199 	hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
200 				      CRYPTO_ALG_ASYNC);
201 	if (IS_ERR(hmac_md5))
202 		goto out_free_md5;
203 
204 	req = ahash_request_alloc(md5, GFP_NOFS);
205 	if (!req)
206 		goto out_free_hmac_md5;
207 
208 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
209 
210 	err = crypto_ahash_init(req);
211 	if (err)
212 		goto out;
213 	sg_init_one(sg, rc4salt, 4);
214 	ahash_request_set_crypt(req, sg, NULL, 4);
215 	err = crypto_ahash_update(req);
216 	if (err)
217 		goto out;
218 
219 	sg_init_one(sg, header, hdrlen);
220 	ahash_request_set_crypt(req, sg, NULL, hdrlen);
221 	err = crypto_ahash_update(req);
222 	if (err)
223 		goto out;
224 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
225 			      checksummer, req);
226 	if (err)
227 		goto out;
228 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
229 	err = crypto_ahash_final(req);
230 	if (err)
231 		goto out;
232 
233 	ahash_request_free(req);
234 	req = ahash_request_alloc(hmac_md5, GFP_NOFS);
235 	if (!req)
236 		goto out_free_hmac_md5;
237 
238 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
239 
240 	err = crypto_ahash_init(req);
241 	if (err)
242 		goto out;
243 	err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
244 	if (err)
245 		goto out;
246 
247 	sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5));
248 	ahash_request_set_crypt(req, sg, checksumdata,
249 				crypto_ahash_digestsize(md5));
250 	err = crypto_ahash_digest(req);
251 	if (err)
252 		goto out;
253 
254 	memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
255 	cksumout->len = kctx->gk5e->cksumlength;
256 out:
257 	ahash_request_free(req);
258 out_free_hmac_md5:
259 	crypto_free_ahash(hmac_md5);
260 out_free_md5:
261 	crypto_free_ahash(md5);
262 out_free_cksum:
263 	kfree(checksumdata);
264 	return err ? GSS_S_FAILURE : 0;
265 }
266 
267 /*
268  * checksum the plaintext data and hdrlen bytes of the token header
269  * The checksum is performed over the first 8 bytes of the
270  * gss token header and then over the data body
271  */
272 u32
273 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
274 	      struct xdr_buf *body, int body_offset, u8 *cksumkey,
275 	      unsigned int usage, struct xdr_netobj *cksumout)
276 {
277 	struct crypto_ahash *tfm;
278 	struct ahash_request *req;
279 	struct scatterlist              sg[1];
280 	int err = -1;
281 	u8 *checksumdata;
282 	unsigned int checksumlen;
283 
284 	if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
285 		return make_checksum_hmac_md5(kctx, header, hdrlen,
286 					      body, body_offset,
287 					      cksumkey, usage, cksumout);
288 
289 	if (cksumout->len < kctx->gk5e->cksumlength) {
290 		dprintk("%s: checksum buffer length, %u, too small for %s\n",
291 			__func__, cksumout->len, kctx->gk5e->name);
292 		return GSS_S_FAILURE;
293 	}
294 
295 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
296 	if (checksumdata == NULL)
297 		return GSS_S_FAILURE;
298 
299 	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
300 	if (IS_ERR(tfm))
301 		goto out_free_cksum;
302 
303 	req = ahash_request_alloc(tfm, GFP_NOFS);
304 	if (!req)
305 		goto out_free_ahash;
306 
307 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
308 
309 	checksumlen = crypto_ahash_digestsize(tfm);
310 
311 	if (cksumkey != NULL) {
312 		err = crypto_ahash_setkey(tfm, cksumkey,
313 					  kctx->gk5e->keylength);
314 		if (err)
315 			goto out;
316 	}
317 
318 	err = crypto_ahash_init(req);
319 	if (err)
320 		goto out;
321 	sg_init_one(sg, header, hdrlen);
322 	ahash_request_set_crypt(req, sg, NULL, hdrlen);
323 	err = crypto_ahash_update(req);
324 	if (err)
325 		goto out;
326 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
327 			      checksummer, req);
328 	if (err)
329 		goto out;
330 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
331 	err = crypto_ahash_final(req);
332 	if (err)
333 		goto out;
334 
335 	switch (kctx->gk5e->ctype) {
336 	case CKSUMTYPE_RSA_MD5:
337 		err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
338 					  checksumdata, checksumlen);
339 		if (err)
340 			goto out;
341 		memcpy(cksumout->data,
342 		       checksumdata + checksumlen - kctx->gk5e->cksumlength,
343 		       kctx->gk5e->cksumlength);
344 		break;
345 	case CKSUMTYPE_HMAC_SHA1_DES3:
346 		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
347 		break;
348 	default:
349 		BUG();
350 		break;
351 	}
352 	cksumout->len = kctx->gk5e->cksumlength;
353 out:
354 	ahash_request_free(req);
355 out_free_ahash:
356 	crypto_free_ahash(tfm);
357 out_free_cksum:
358 	kfree(checksumdata);
359 	return err ? GSS_S_FAILURE : 0;
360 }
361 
362 /*
363  * checksum the plaintext data and hdrlen bytes of the token header
364  * Per rfc4121, sec. 4.2.4, the checksum is performed over the data
365  * body then over the first 16 octets of the MIC token
366  * Inclusion of the header data in the calculation of the
367  * checksum is optional.
368  */
369 u32
370 make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
371 		 struct xdr_buf *body, int body_offset, u8 *cksumkey,
372 		 unsigned int usage, struct xdr_netobj *cksumout)
373 {
374 	struct crypto_ahash *tfm;
375 	struct ahash_request *req;
376 	struct scatterlist sg[1];
377 	int err = -1;
378 	u8 *checksumdata;
379 	unsigned int checksumlen;
380 
381 	if (kctx->gk5e->keyed_cksum == 0) {
382 		dprintk("%s: expected keyed hash for %s\n",
383 			__func__, kctx->gk5e->name);
384 		return GSS_S_FAILURE;
385 	}
386 	if (cksumkey == NULL) {
387 		dprintk("%s: no key supplied for %s\n",
388 			__func__, kctx->gk5e->name);
389 		return GSS_S_FAILURE;
390 	}
391 
392 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
393 	if (!checksumdata)
394 		return GSS_S_FAILURE;
395 
396 	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
397 	if (IS_ERR(tfm))
398 		goto out_free_cksum;
399 	checksumlen = crypto_ahash_digestsize(tfm);
400 
401 	req = ahash_request_alloc(tfm, GFP_NOFS);
402 	if (!req)
403 		goto out_free_ahash;
404 
405 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
406 
407 	err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
408 	if (err)
409 		goto out;
410 
411 	err = crypto_ahash_init(req);
412 	if (err)
413 		goto out;
414 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
415 			      checksummer, req);
416 	if (err)
417 		goto out;
418 	if (header != NULL) {
419 		sg_init_one(sg, header, hdrlen);
420 		ahash_request_set_crypt(req, sg, NULL, hdrlen);
421 		err = crypto_ahash_update(req);
422 		if (err)
423 			goto out;
424 	}
425 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
426 	err = crypto_ahash_final(req);
427 	if (err)
428 		goto out;
429 
430 	cksumout->len = kctx->gk5e->cksumlength;
431 
432 	switch (kctx->gk5e->ctype) {
433 	case CKSUMTYPE_HMAC_SHA1_96_AES128:
434 	case CKSUMTYPE_HMAC_SHA1_96_AES256:
435 		/* note that this truncates the hash */
436 		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
437 		break;
438 	default:
439 		BUG();
440 		break;
441 	}
442 out:
443 	ahash_request_free(req);
444 out_free_ahash:
445 	crypto_free_ahash(tfm);
446 out_free_cksum:
447 	kfree(checksumdata);
448 	return err ? GSS_S_FAILURE : 0;
449 }
450 
451 struct encryptor_desc {
452 	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
453 	struct skcipher_request *req;
454 	int pos;
455 	struct xdr_buf *outbuf;
456 	struct page **pages;
457 	struct scatterlist infrags[4];
458 	struct scatterlist outfrags[4];
459 	int fragno;
460 	int fraglen;
461 };
462 
463 static int
464 encryptor(struct scatterlist *sg, void *data)
465 {
466 	struct encryptor_desc *desc = data;
467 	struct xdr_buf *outbuf = desc->outbuf;
468 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
469 	struct page *in_page;
470 	int thislen = desc->fraglen + sg->length;
471 	int fraglen, ret;
472 	int page_pos;
473 
474 	/* Worst case is 4 fragments: head, end of page 1, start
475 	 * of page 2, tail.  Anything more is a bug. */
476 	BUG_ON(desc->fragno > 3);
477 
478 	page_pos = desc->pos - outbuf->head[0].iov_len;
479 	if (page_pos >= 0 && page_pos < outbuf->page_len) {
480 		/* pages are not in place: */
481 		int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
482 		in_page = desc->pages[i];
483 	} else {
484 		in_page = sg_page(sg);
485 	}
486 	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
487 		    sg->offset);
488 	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
489 		    sg->offset);
490 	desc->fragno++;
491 	desc->fraglen += sg->length;
492 	desc->pos += sg->length;
493 
494 	fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
495 	thislen -= fraglen;
496 
497 	if (thislen == 0)
498 		return 0;
499 
500 	sg_mark_end(&desc->infrags[desc->fragno - 1]);
501 	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
502 
503 	skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
504 				   thislen, desc->iv);
505 
506 	ret = crypto_skcipher_encrypt(desc->req);
507 	if (ret)
508 		return ret;
509 
510 	sg_init_table(desc->infrags, 4);
511 	sg_init_table(desc->outfrags, 4);
512 
513 	if (fraglen) {
514 		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
515 				sg->offset + sg->length - fraglen);
516 		desc->infrags[0] = desc->outfrags[0];
517 		sg_assign_page(&desc->infrags[0], in_page);
518 		desc->fragno = 1;
519 		desc->fraglen = fraglen;
520 	} else {
521 		desc->fragno = 0;
522 		desc->fraglen = 0;
523 	}
524 	return 0;
525 }
526 
527 int
528 gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
529 		    int offset, struct page **pages)
530 {
531 	int ret;
532 	struct encryptor_desc desc;
533 	SKCIPHER_REQUEST_ON_STACK(req, tfm);
534 
535 	BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
536 
537 	skcipher_request_set_tfm(req, tfm);
538 	skcipher_request_set_callback(req, 0, NULL, NULL);
539 
540 	memset(desc.iv, 0, sizeof(desc.iv));
541 	desc.req = req;
542 	desc.pos = offset;
543 	desc.outbuf = buf;
544 	desc.pages = pages;
545 	desc.fragno = 0;
546 	desc.fraglen = 0;
547 
548 	sg_init_table(desc.infrags, 4);
549 	sg_init_table(desc.outfrags, 4);
550 
551 	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
552 	skcipher_request_zero(req);
553 	return ret;
554 }
555 
556 struct decryptor_desc {
557 	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
558 	struct skcipher_request *req;
559 	struct scatterlist frags[4];
560 	int fragno;
561 	int fraglen;
562 };
563 
564 static int
565 decryptor(struct scatterlist *sg, void *data)
566 {
567 	struct decryptor_desc *desc = data;
568 	int thislen = desc->fraglen + sg->length;
569 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
570 	int fraglen, ret;
571 
572 	/* Worst case is 4 fragments: head, end of page 1, start
573 	 * of page 2, tail.  Anything more is a bug. */
574 	BUG_ON(desc->fragno > 3);
575 	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
576 		    sg->offset);
577 	desc->fragno++;
578 	desc->fraglen += sg->length;
579 
580 	fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
581 	thislen -= fraglen;
582 
583 	if (thislen == 0)
584 		return 0;
585 
586 	sg_mark_end(&desc->frags[desc->fragno - 1]);
587 
588 	skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
589 				   thislen, desc->iv);
590 
591 	ret = crypto_skcipher_decrypt(desc->req);
592 	if (ret)
593 		return ret;
594 
595 	sg_init_table(desc->frags, 4);
596 
597 	if (fraglen) {
598 		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
599 				sg->offset + sg->length - fraglen);
600 		desc->fragno = 1;
601 		desc->fraglen = fraglen;
602 	} else {
603 		desc->fragno = 0;
604 		desc->fraglen = 0;
605 	}
606 	return 0;
607 }
608 
609 int
610 gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
611 		    int offset)
612 {
613 	int ret;
614 	struct decryptor_desc desc;
615 	SKCIPHER_REQUEST_ON_STACK(req, tfm);
616 
617 	/* XXXJBF: */
618 	BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
619 
620 	skcipher_request_set_tfm(req, tfm);
621 	skcipher_request_set_callback(req, 0, NULL, NULL);
622 
623 	memset(desc.iv, 0, sizeof(desc.iv));
624 	desc.req = req;
625 	desc.fragno = 0;
626 	desc.fraglen = 0;
627 
628 	sg_init_table(desc.frags, 4);
629 
630 	ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
631 	skcipher_request_zero(req);
632 	return ret;
633 }
634 
635 /*
636  * This function makes the assumption that it was ultimately called
637  * from gss_wrap().
638  *
639  * The client auth_gss code moves any existing tail data into a
640  * separate page before calling gss_wrap.
641  * The server svcauth_gss code ensures that both the head and the
642  * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
643  *
644  * Even with that guarantee, this function may be called more than
645  * once in the processing of gss_wrap().  The best we can do is
646  * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
647  * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
648  * At run-time we can verify that a single invocation of this
649  * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
650  */
651 
652 int
653 xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
654 {
655 	u8 *p;
656 
657 	if (shiftlen == 0)
658 		return 0;
659 
660 	BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
661 	BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
662 
663 	p = buf->head[0].iov_base + base;
664 
665 	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
666 
667 	buf->head[0].iov_len += shiftlen;
668 	buf->len += shiftlen;
669 
670 	return 0;
671 }
672 
673 static u32
674 gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
675 		   u32 offset, u8 *iv, struct page **pages, int encrypt)
676 {
677 	u32 ret;
678 	struct scatterlist sg[1];
679 	SKCIPHER_REQUEST_ON_STACK(req, cipher);
680 	u8 *data;
681 	struct page **save_pages;
682 	u32 len = buf->len - offset;
683 
684 	if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
685 		WARN_ON(0);
686 		return -ENOMEM;
687 	}
688 	data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_NOFS);
689 	if (!data)
690 		return -ENOMEM;
691 
692 	/*
693 	 * For encryption, we want to read from the cleartext
694 	 * page cache pages, and write the encrypted data to
695 	 * the supplied xdr_buf pages.
696 	 */
697 	save_pages = buf->pages;
698 	if (encrypt)
699 		buf->pages = pages;
700 
701 	ret = read_bytes_from_xdr_buf(buf, offset, data, len);
702 	buf->pages = save_pages;
703 	if (ret)
704 		goto out;
705 
706 	sg_init_one(sg, data, len);
707 
708 	skcipher_request_set_tfm(req, cipher);
709 	skcipher_request_set_callback(req, 0, NULL, NULL);
710 	skcipher_request_set_crypt(req, sg, sg, len, iv);
711 
712 	if (encrypt)
713 		ret = crypto_skcipher_encrypt(req);
714 	else
715 		ret = crypto_skcipher_decrypt(req);
716 
717 	skcipher_request_zero(req);
718 
719 	if (ret)
720 		goto out;
721 
722 	ret = write_bytes_to_xdr_buf(buf, offset, data, len);
723 
724 out:
725 	kfree(data);
726 	return ret;
727 }
728 
729 u32
730 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
731 		     struct xdr_buf *buf, struct page **pages)
732 {
733 	u32 err;
734 	struct xdr_netobj hmac;
735 	u8 *cksumkey;
736 	u8 *ecptr;
737 	struct crypto_skcipher *cipher, *aux_cipher;
738 	int blocksize;
739 	struct page **save_pages;
740 	int nblocks, nbytes;
741 	struct encryptor_desc desc;
742 	u32 cbcbytes;
743 	unsigned int usage;
744 
745 	if (kctx->initiate) {
746 		cipher = kctx->initiator_enc;
747 		aux_cipher = kctx->initiator_enc_aux;
748 		cksumkey = kctx->initiator_integ;
749 		usage = KG_USAGE_INITIATOR_SEAL;
750 	} else {
751 		cipher = kctx->acceptor_enc;
752 		aux_cipher = kctx->acceptor_enc_aux;
753 		cksumkey = kctx->acceptor_integ;
754 		usage = KG_USAGE_ACCEPTOR_SEAL;
755 	}
756 	blocksize = crypto_skcipher_blocksize(cipher);
757 
758 	/* hide the gss token header and insert the confounder */
759 	offset += GSS_KRB5_TOK_HDR_LEN;
760 	if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
761 		return GSS_S_FAILURE;
762 	gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
763 	offset -= GSS_KRB5_TOK_HDR_LEN;
764 
765 	if (buf->tail[0].iov_base != NULL) {
766 		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
767 	} else {
768 		buf->tail[0].iov_base = buf->head[0].iov_base
769 							+ buf->head[0].iov_len;
770 		buf->tail[0].iov_len = 0;
771 		ecptr = buf->tail[0].iov_base;
772 	}
773 
774 	/* copy plaintext gss token header after filler (if any) */
775 	memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
776 	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
777 	buf->len += GSS_KRB5_TOK_HDR_LEN;
778 
779 	/* Do the HMAC */
780 	hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
781 	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
782 
783 	/*
784 	 * When we are called, pages points to the real page cache
785 	 * data -- which we can't go and encrypt!  buf->pages points
786 	 * to scratch pages which we are going to send off to the
787 	 * client/server.  Swap in the plaintext pages to calculate
788 	 * the hmac.
789 	 */
790 	save_pages = buf->pages;
791 	buf->pages = pages;
792 
793 	err = make_checksum_v2(kctx, NULL, 0, buf,
794 			       offset + GSS_KRB5_TOK_HDR_LEN,
795 			       cksumkey, usage, &hmac);
796 	buf->pages = save_pages;
797 	if (err)
798 		return GSS_S_FAILURE;
799 
800 	nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
801 	nblocks = (nbytes + blocksize - 1) / blocksize;
802 	cbcbytes = 0;
803 	if (nblocks > 2)
804 		cbcbytes = (nblocks - 2) * blocksize;
805 
806 	memset(desc.iv, 0, sizeof(desc.iv));
807 
808 	if (cbcbytes) {
809 		SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
810 
811 		desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
812 		desc.fragno = 0;
813 		desc.fraglen = 0;
814 		desc.pages = pages;
815 		desc.outbuf = buf;
816 		desc.req = req;
817 
818 		skcipher_request_set_tfm(req, aux_cipher);
819 		skcipher_request_set_callback(req, 0, NULL, NULL);
820 
821 		sg_init_table(desc.infrags, 4);
822 		sg_init_table(desc.outfrags, 4);
823 
824 		err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
825 				      cbcbytes, encryptor, &desc);
826 		skcipher_request_zero(req);
827 		if (err)
828 			goto out_err;
829 	}
830 
831 	/* Make sure IV carries forward from any CBC results. */
832 	err = gss_krb5_cts_crypt(cipher, buf,
833 				 offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
834 				 desc.iv, pages, 1);
835 	if (err) {
836 		err = GSS_S_FAILURE;
837 		goto out_err;
838 	}
839 
840 	/* Now update buf to account for HMAC */
841 	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
842 	buf->len += kctx->gk5e->cksumlength;
843 
844 out_err:
845 	if (err)
846 		err = GSS_S_FAILURE;
847 	return err;
848 }
849 
850 u32
851 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
852 		     u32 *headskip, u32 *tailskip)
853 {
854 	struct xdr_buf subbuf;
855 	u32 ret = 0;
856 	u8 *cksum_key;
857 	struct crypto_skcipher *cipher, *aux_cipher;
858 	struct xdr_netobj our_hmac_obj;
859 	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
860 	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
861 	int nblocks, blocksize, cbcbytes;
862 	struct decryptor_desc desc;
863 	unsigned int usage;
864 
865 	if (kctx->initiate) {
866 		cipher = kctx->acceptor_enc;
867 		aux_cipher = kctx->acceptor_enc_aux;
868 		cksum_key = kctx->acceptor_integ;
869 		usage = KG_USAGE_ACCEPTOR_SEAL;
870 	} else {
871 		cipher = kctx->initiator_enc;
872 		aux_cipher = kctx->initiator_enc_aux;
873 		cksum_key = kctx->initiator_integ;
874 		usage = KG_USAGE_INITIATOR_SEAL;
875 	}
876 	blocksize = crypto_skcipher_blocksize(cipher);
877 
878 
879 	/* create a segment skipping the header and leaving out the checksum */
880 	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
881 				    (buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
882 				     kctx->gk5e->cksumlength));
883 
884 	nblocks = (subbuf.len + blocksize - 1) / blocksize;
885 
886 	cbcbytes = 0;
887 	if (nblocks > 2)
888 		cbcbytes = (nblocks - 2) * blocksize;
889 
890 	memset(desc.iv, 0, sizeof(desc.iv));
891 
892 	if (cbcbytes) {
893 		SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
894 
895 		desc.fragno = 0;
896 		desc.fraglen = 0;
897 		desc.req = req;
898 
899 		skcipher_request_set_tfm(req, aux_cipher);
900 		skcipher_request_set_callback(req, 0, NULL, NULL);
901 
902 		sg_init_table(desc.frags, 4);
903 
904 		ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
905 		skcipher_request_zero(req);
906 		if (ret)
907 			goto out_err;
908 	}
909 
910 	/* Make sure IV carries forward from any CBC results. */
911 	ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
912 	if (ret)
913 		goto out_err;
914 
915 
916 	/* Calculate our hmac over the plaintext data */
917 	our_hmac_obj.len = sizeof(our_hmac);
918 	our_hmac_obj.data = our_hmac;
919 
920 	ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
921 			       cksum_key, usage, &our_hmac_obj);
922 	if (ret)
923 		goto out_err;
924 
925 	/* Get the packet's hmac value */
926 	ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
927 				      pkt_hmac, kctx->gk5e->cksumlength);
928 	if (ret)
929 		goto out_err;
930 
931 	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
932 		ret = GSS_S_BAD_SIG;
933 		goto out_err;
934 	}
935 	*headskip = kctx->gk5e->conflen;
936 	*tailskip = kctx->gk5e->cksumlength;
937 out_err:
938 	if (ret && ret != GSS_S_BAD_SIG)
939 		ret = GSS_S_FAILURE;
940 	return ret;
941 }
942 
943 /*
944  * Compute Kseq given the initial session key and the checksum.
945  * Set the key of the given cipher.
946  */
947 int
948 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
949 		       unsigned char *cksum)
950 {
951 	struct crypto_shash *hmac;
952 	struct shash_desc *desc;
953 	u8 Kseq[GSS_KRB5_MAX_KEYLEN];
954 	u32 zeroconstant = 0;
955 	int err;
956 
957 	dprintk("%s: entered\n", __func__);
958 
959 	hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
960 	if (IS_ERR(hmac)) {
961 		dprintk("%s: error %ld, allocating hash '%s'\n",
962 			__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
963 		return PTR_ERR(hmac);
964 	}
965 
966 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
967 		       GFP_NOFS);
968 	if (!desc) {
969 		dprintk("%s: failed to allocate shash descriptor for '%s'\n",
970 			__func__, kctx->gk5e->cksum_name);
971 		crypto_free_shash(hmac);
972 		return -ENOMEM;
973 	}
974 
975 	desc->tfm = hmac;
976 	desc->flags = 0;
977 
978 	/* Compute intermediate Kseq from session key */
979 	err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
980 	if (err)
981 		goto out_err;
982 
983 	err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq);
984 	if (err)
985 		goto out_err;
986 
987 	/* Compute final Kseq from the checksum and intermediate Kseq */
988 	err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength);
989 	if (err)
990 		goto out_err;
991 
992 	err = crypto_shash_digest(desc, cksum, 8, Kseq);
993 	if (err)
994 		goto out_err;
995 
996 	err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
997 	if (err)
998 		goto out_err;
999 
1000 	err = 0;
1001 
1002 out_err:
1003 	kzfree(desc);
1004 	crypto_free_shash(hmac);
1005 	dprintk("%s: returning %d\n", __func__, err);
1006 	return err;
1007 }
1008 
1009 /*
1010  * Compute Kcrypt given the initial session key and the plaintext seqnum.
1011  * Set the key of cipher kctx->enc.
1012  */
1013 int
1014 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
1015 		       s32 seqnum)
1016 {
1017 	struct crypto_shash *hmac;
1018 	struct shash_desc *desc;
1019 	u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
1020 	u8 zeroconstant[4] = {0};
1021 	u8 seqnumarray[4];
1022 	int err, i;
1023 
1024 	dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
1025 
1026 	hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
1027 	if (IS_ERR(hmac)) {
1028 		dprintk("%s: error %ld, allocating hash '%s'\n",
1029 			__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
1030 		return PTR_ERR(hmac);
1031 	}
1032 
1033 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac),
1034 		       GFP_NOFS);
1035 	if (!desc) {
1036 		dprintk("%s: failed to allocate shash descriptor for '%s'\n",
1037 			__func__, kctx->gk5e->cksum_name);
1038 		crypto_free_shash(hmac);
1039 		return -ENOMEM;
1040 	}
1041 
1042 	desc->tfm = hmac;
1043 	desc->flags = 0;
1044 
1045 	/* Compute intermediate Kcrypt from session key */
1046 	for (i = 0; i < kctx->gk5e->keylength; i++)
1047 		Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
1048 
1049 	err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
1050 	if (err)
1051 		goto out_err;
1052 
1053 	err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt);
1054 	if (err)
1055 		goto out_err;
1056 
1057 	/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
1058 	err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
1059 	if (err)
1060 		goto out_err;
1061 
1062 	seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
1063 	seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
1064 	seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
1065 	seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
1066 
1067 	err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt);
1068 	if (err)
1069 		goto out_err;
1070 
1071 	err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
1072 	if (err)
1073 		goto out_err;
1074 
1075 	err = 0;
1076 
1077 out_err:
1078 	kzfree(desc);
1079 	crypto_free_shash(hmac);
1080 	dprintk("%s: returning %d\n", __func__, err);
1081 	return err;
1082 }
1083 
1084