11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/net/sunrpc/gss_krb5_crypto.c
31da177e4SLinus Torvalds  *
481d4a433SKevin Coffman  *  Copyright (c) 2000-2008 The Regents of the University of Michigan.
51da177e4SLinus Torvalds  *  All rights reserved.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *  Andy Adamson   <andros@umich.edu>
81da177e4SLinus Torvalds  *  Bruce Fields   <bfields@umich.edu>
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds 
111da177e4SLinus Torvalds /*
121da177e4SLinus Torvalds  * Copyright (C) 1998 by the FundsXpress, INC.
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * All rights reserved.
151da177e4SLinus Torvalds  *
161da177e4SLinus Torvalds  * Export of this software from the United States of America may require
171da177e4SLinus Torvalds  * a specific license from the United States Government.  It is the
181da177e4SLinus Torvalds  * responsibility of any person or organization contemplating export to
191da177e4SLinus Torvalds  * obtain such a license before exporting.
201da177e4SLinus Torvalds  *
211da177e4SLinus Torvalds  * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
221da177e4SLinus Torvalds  * distribute this software and its documentation for any purpose and
231da177e4SLinus Torvalds  * without fee is hereby granted, provided that the above copyright
241da177e4SLinus Torvalds  * notice appear in all copies and that both that copyright notice and
251da177e4SLinus Torvalds  * this permission notice appear in supporting documentation, and that
261da177e4SLinus Torvalds  * the name of FundsXpress. not be used in advertising or publicity pertaining
271da177e4SLinus Torvalds  * to distribution of the software without specific, written prior
281da177e4SLinus Torvalds  * permission.  FundsXpress makes no representations about the suitability of
291da177e4SLinus Torvalds  * this software for any purpose.  It is provided "as is" without express
301da177e4SLinus Torvalds  * or implied warranty.
311da177e4SLinus Torvalds  *
321da177e4SLinus Torvalds  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
331da177e4SLinus Torvalds  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
341da177e4SLinus Torvalds  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
351da177e4SLinus Torvalds  */
361da177e4SLinus Torvalds 
3715a8b93fSJason A. Donenfeld #include <crypto/algapi.h>
383b5cf20cSHerbert Xu #include <crypto/hash.h>
393b5cf20cSHerbert Xu #include <crypto/skcipher.h>
4035058687SHerbert Xu #include <linux/err.h>
411da177e4SLinus Torvalds #include <linux/types.h>
421da177e4SLinus Torvalds #include <linux/mm.h>
43378f058cSDavid Hardeman #include <linux/scatterlist.h>
441da177e4SLinus Torvalds #include <linux/highmem.h>
451da177e4SLinus Torvalds #include <linux/pagemap.h>
46934a95aaSKevin Coffman #include <linux/random.h>
471da177e4SLinus Torvalds #include <linux/sunrpc/gss_krb5.h>
4837a4e6cbSOlga Kornievskaia #include <linux/sunrpc/xdr.h>
49e1a9a384SChuck Lever #include <kunit/visibility.h>
501da177e4SLinus Torvalds 
517f675ca7SChuck Lever #include "gss_krb5_internal.h"
527f675ca7SChuck Lever 
53f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
541da177e4SLinus Torvalds # define RPCDBG_FACILITY        RPCDBG_AUTH
551da177e4SLinus Torvalds #endif
561da177e4SLinus Torvalds 
577f675ca7SChuck Lever /**
587f675ca7SChuck Lever  * krb5_make_confounder - Generate a confounder string
597f675ca7SChuck Lever  * @p: memory location into which to write the string
607f675ca7SChuck Lever  * @conflen: string length to write, in octets
617f675ca7SChuck Lever  *
627f675ca7SChuck Lever  * RFCs 1964 and 3961 mention only "a random confounder" without going
637f675ca7SChuck Lever  * into detail about its function or cryptographic requirements. The
647f675ca7SChuck Lever  * assumed purpose is to prevent repeated encryption of a plaintext with
657f675ca7SChuck Lever  * the same key from generating the same ciphertext. It is also used to
667f675ca7SChuck Lever  * pad minimum plaintext length to at least a single cipher block.
677f675ca7SChuck Lever  *
687f675ca7SChuck Lever  * However, in situations like the GSS Kerberos 5 mechanism, where the
697f675ca7SChuck Lever  * encryption IV is always all zeroes, the confounder also effectively
707f675ca7SChuck Lever  * functions like an IV. Thus, not only must it be unique from message
717f675ca7SChuck Lever  * to message, but it must also be difficult to predict. Otherwise an
727f675ca7SChuck Lever  * attacker can correlate the confounder to previous or future values,
737f675ca7SChuck Lever  * making the encryption easier to break.
747f675ca7SChuck Lever  *
757f675ca7SChuck Lever  * Given that the primary consumer of this encryption mechanism is a
767f675ca7SChuck Lever  * network storage protocol, a type of traffic that often carries
777f675ca7SChuck Lever  * predictable payloads (eg, all zeroes when reading unallocated blocks
787f675ca7SChuck Lever  * from a file), our confounder generation has to be cryptographically
797f675ca7SChuck Lever  * strong.
807f675ca7SChuck Lever  */
krb5_make_confounder(u8 * p,int conflen)817f675ca7SChuck Lever void krb5_make_confounder(u8 *p, int conflen)
827f675ca7SChuck Lever {
837f675ca7SChuck Lever 	get_random_bytes(p, conflen);
847f675ca7SChuck Lever }
857f675ca7SChuck Lever 
86d50b8152SChuck Lever /**
87d50b8152SChuck Lever  * krb5_encrypt - simple encryption of an RPCSEC GSS payload
88d50b8152SChuck Lever  * @tfm: initialized cipher transform
89d50b8152SChuck Lever  * @iv: pointer to an IV
90d50b8152SChuck Lever  * @in: plaintext to encrypt
91d50b8152SChuck Lever  * @out: OUT: ciphertext
92d50b8152SChuck Lever  * @length: length of input and output buffers, in bytes
93d50b8152SChuck Lever  *
94d50b8152SChuck Lever  * @iv may be NULL to force the use of an all-zero IV.
95d50b8152SChuck Lever  * The buffer containing the IV must be as large as the
96d50b8152SChuck Lever  * cipher's ivsize.
97d50b8152SChuck Lever  *
98d50b8152SChuck Lever  * Return values:
99d50b8152SChuck Lever  *   %0: @in successfully encrypted into @out
100d50b8152SChuck Lever  *   negative errno: @in not encrypted
101d50b8152SChuck Lever  */
1021da177e4SLinus Torvalds u32
krb5_encrypt(struct crypto_sync_skcipher * tfm,void * iv,void * in,void * out,int length)1031da177e4SLinus Torvalds krb5_encrypt(
104e9e575b8SKees Cook 	struct crypto_sync_skcipher *tfm,
1051da177e4SLinus Torvalds 	void * iv,
1061da177e4SLinus Torvalds 	void * in,
1071da177e4SLinus Torvalds 	void * out,
1081da177e4SLinus Torvalds 	int length)
1091da177e4SLinus Torvalds {
1101da177e4SLinus Torvalds 	u32 ret = -EINVAL;
1111da177e4SLinus Torvalds 	struct scatterlist sg[1];
11281d4a433SKevin Coffman 	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
113e9e575b8SKees Cook 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
1141da177e4SLinus Torvalds 
115e9e575b8SKees Cook 	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
1161da177e4SLinus Torvalds 		goto out;
1171da177e4SLinus Torvalds 
118e9e575b8SKees Cook 	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
1193d4a6886SKevin Coffman 		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
120e9e575b8SKees Cook 			crypto_sync_skcipher_ivsize(tfm));
1211da177e4SLinus Torvalds 		goto out;
1221da177e4SLinus Torvalds 	}
1231da177e4SLinus Torvalds 
1241da177e4SLinus Torvalds 	if (iv)
125e9e575b8SKees Cook 		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
1261da177e4SLinus Torvalds 
1271da177e4SLinus Torvalds 	memcpy(out, in, length);
12868e3f5ddSHerbert Xu 	sg_init_one(sg, out, length);
1291da177e4SLinus Torvalds 
130e9e575b8SKees Cook 	skcipher_request_set_sync_tfm(req, tfm);
1313b5cf20cSHerbert Xu 	skcipher_request_set_callback(req, 0, NULL, NULL);
1323b5cf20cSHerbert Xu 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
1333b5cf20cSHerbert Xu 
1343b5cf20cSHerbert Xu 	ret = crypto_skcipher_encrypt(req);
1353b5cf20cSHerbert Xu 	skcipher_request_zero(req);
1361da177e4SLinus Torvalds out:
1371da177e4SLinus Torvalds 	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
1388fc7500bSJ. Bruce Fields 	return ret;
1391da177e4SLinus Torvalds }
1401da177e4SLinus Torvalds 
141d50b8152SChuck Lever /**
142d50b8152SChuck Lever  * krb5_decrypt - simple decryption of an RPCSEC GSS payload
143d50b8152SChuck Lever  * @tfm: initialized cipher transform
144d50b8152SChuck Lever  * @iv: pointer to an IV
145d50b8152SChuck Lever  * @in: ciphertext to decrypt
146d50b8152SChuck Lever  * @out: OUT: plaintext
147d50b8152SChuck Lever  * @length: length of input and output buffers, in bytes
148d50b8152SChuck Lever  *
149d50b8152SChuck Lever  * @iv may be NULL to force the use of an all-zero IV.
150d50b8152SChuck Lever  * The buffer containing the IV must be as large as the
151d50b8152SChuck Lever  * cipher's ivsize.
152d50b8152SChuck Lever  *
153d50b8152SChuck Lever  * Return values:
154d50b8152SChuck Lever  *   %0: @in successfully decrypted into @out
155d50b8152SChuck Lever  *   negative errno: @in not decrypted
156d50b8152SChuck Lever  */
1571da177e4SLinus Torvalds u32
krb5_decrypt(struct crypto_sync_skcipher * tfm,void * iv,void * in,void * out,int length)1581da177e4SLinus Torvalds krb5_decrypt(
159e9e575b8SKees Cook      struct crypto_sync_skcipher *tfm,
1601da177e4SLinus Torvalds      void * iv,
1611da177e4SLinus Torvalds      void * in,
1621da177e4SLinus Torvalds      void * out,
1631da177e4SLinus Torvalds      int length)
1641da177e4SLinus Torvalds {
1651da177e4SLinus Torvalds 	u32 ret = -EINVAL;
1661da177e4SLinus Torvalds 	struct scatterlist sg[1];
16781d4a433SKevin Coffman 	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
168e9e575b8SKees Cook 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
1691da177e4SLinus Torvalds 
170e9e575b8SKees Cook 	if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
1711da177e4SLinus Torvalds 		goto out;
1721da177e4SLinus Torvalds 
173e9e575b8SKees Cook 	if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
1743d4a6886SKevin Coffman 		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
175e9e575b8SKees Cook 			crypto_sync_skcipher_ivsize(tfm));
1761da177e4SLinus Torvalds 		goto out;
1771da177e4SLinus Torvalds 	}
1781da177e4SLinus Torvalds 	if (iv)
179e9e575b8SKees Cook 		memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
1801da177e4SLinus Torvalds 
1811da177e4SLinus Torvalds 	memcpy(out, in, length);
18268e3f5ddSHerbert Xu 	sg_init_one(sg, out, length);
1831da177e4SLinus Torvalds 
184e9e575b8SKees Cook 	skcipher_request_set_sync_tfm(req, tfm);
1853b5cf20cSHerbert Xu 	skcipher_request_set_callback(req, 0, NULL, NULL);
1863b5cf20cSHerbert Xu 	skcipher_request_set_crypt(req, sg, sg, length, local_iv);
1873b5cf20cSHerbert Xu 
1883b5cf20cSHerbert Xu 	ret = crypto_skcipher_decrypt(req);
1893b5cf20cSHerbert Xu 	skcipher_request_zero(req);
1901da177e4SLinus Torvalds out:
1911da177e4SLinus Torvalds 	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
1928fc7500bSJ. Bruce Fields 	return ret;
1931da177e4SLinus Torvalds }
1941da177e4SLinus Torvalds 
195f7b3af64SJ. Bruce Fields static int
checksummer(struct scatterlist * sg,void * data)196f7b3af64SJ. Bruce Fields checksummer(struct scatterlist *sg, void *data)
197f7b3af64SJ. Bruce Fields {
1983b5cf20cSHerbert Xu 	struct ahash_request *req = data;
199f7b3af64SJ. Bruce Fields 
2003b5cf20cSHerbert Xu 	ahash_request_set_crypt(req, sg, NULL, sg->length);
2013b5cf20cSHerbert Xu 
2023b5cf20cSHerbert Xu 	return crypto_ahash_update(req);
203f7b3af64SJ. Bruce Fields }
204f7b3af64SJ. Bruce Fields 
205e1f6c07bSKevin Coffman /*
206e1f6c07bSKevin Coffman  * checksum the plaintext data and hdrlen bytes of the token header
207e1f6c07bSKevin Coffman  * The checksum is performed over the first 8 bytes of the
208e1f6c07bSKevin Coffman  * gss token header and then over the data body
209e1f6c07bSKevin Coffman  */
210e1f6c07bSKevin Coffman u32
make_checksum(struct krb5_ctx * kctx,char * header,int hdrlen,struct xdr_buf * body,int body_offset,u8 * cksumkey,unsigned int usage,struct xdr_netobj * cksumout)211e1f6c07bSKevin Coffman make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
212e1f6c07bSKevin Coffman 	      struct xdr_buf *body, int body_offset, u8 *cksumkey,
2138b237076SKevin Coffman 	      unsigned int usage, struct xdr_netobj *cksumout)
2141da177e4SLinus Torvalds {
2153b5cf20cSHerbert Xu 	struct crypto_ahash *tfm;
2163b5cf20cSHerbert Xu 	struct ahash_request *req;
2171da177e4SLinus Torvalds 	struct scatterlist              sg[1];
2182876a344SJ. Bruce Fields 	int err = -1;
2192876a344SJ. Bruce Fields 	u8 *checksumdata;
220e1f6c07bSKevin Coffman 	unsigned int checksumlen;
2211da177e4SLinus Torvalds 
222e1f6c07bSKevin Coffman 	if (cksumout->len < kctx->gk5e->cksumlength) {
223e1f6c07bSKevin Coffman 		dprintk("%s: checksum buffer length, %u, too small for %s\n",
224e1f6c07bSKevin Coffman 			__func__, cksumout->len, kctx->gk5e->name);
225e1f6c07bSKevin Coffman 		return GSS_S_FAILURE;
226e1f6c07bSKevin Coffman 	}
227e1f6c07bSKevin Coffman 
2284c2883e7STrond Myklebust 	checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
2292876a344SJ. Bruce Fields 	if (checksumdata == NULL)
230d4a30e7eSJ. Bruce Fields 		return GSS_S_FAILURE;
2311da177e4SLinus Torvalds 
2322876a344SJ. Bruce Fields 	tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
2332876a344SJ. Bruce Fields 	if (IS_ERR(tfm))
2342876a344SJ. Bruce Fields 		goto out_free_cksum;
2352876a344SJ. Bruce Fields 
2364c2883e7STrond Myklebust 	req = ahash_request_alloc(tfm, GFP_KERNEL);
2372876a344SJ. Bruce Fields 	if (!req)
2382876a344SJ. Bruce Fields 		goto out_free_ahash;
2393b5cf20cSHerbert Xu 
2403b5cf20cSHerbert Xu 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
2413b5cf20cSHerbert Xu 
2423b5cf20cSHerbert Xu 	checksumlen = crypto_ahash_digestsize(tfm);
243e1f6c07bSKevin Coffman 
244e1f6c07bSKevin Coffman 	if (cksumkey != NULL) {
2453b5cf20cSHerbert Xu 		err = crypto_ahash_setkey(tfm, cksumkey,
246e1f6c07bSKevin Coffman 					  kctx->gk5e->keylength);
247e1f6c07bSKevin Coffman 		if (err)
248e1f6c07bSKevin Coffman 			goto out;
249e1f6c07bSKevin Coffman 	}
250e1f6c07bSKevin Coffman 
2513b5cf20cSHerbert Xu 	err = crypto_ahash_init(req);
25235058687SHerbert Xu 	if (err)
25335058687SHerbert Xu 		goto out;
25468e3f5ddSHerbert Xu 	sg_init_one(sg, header, hdrlen);
2553b5cf20cSHerbert Xu 	ahash_request_set_crypt(req, sg, NULL, hdrlen);
2563b5cf20cSHerbert Xu 	err = crypto_ahash_update(req);
25735058687SHerbert Xu 	if (err)
25835058687SHerbert Xu 		goto out;
25937a4e6cbSOlga Kornievskaia 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
2603b5cf20cSHerbert Xu 			      checksummer, req);
26135058687SHerbert Xu 	if (err)
26235058687SHerbert Xu 		goto out;
2633b5cf20cSHerbert Xu 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
2643b5cf20cSHerbert Xu 	err = crypto_ahash_final(req);
265e1f6c07bSKevin Coffman 	if (err)
266e1f6c07bSKevin Coffman 		goto out;
26735058687SHerbert Xu 
268e1f6c07bSKevin Coffman 	switch (kctx->gk5e->ctype) {
269e1f6c07bSKevin Coffman 	case CKSUMTYPE_RSA_MD5:
270d50b8152SChuck Lever 		err = krb5_encrypt(kctx->seq, NULL, checksumdata,
271e1f6c07bSKevin Coffman 				   checksumdata, checksumlen);
272e1f6c07bSKevin Coffman 		if (err)
273e1f6c07bSKevin Coffman 			goto out;
274e1f6c07bSKevin Coffman 		memcpy(cksumout->data,
275e1f6c07bSKevin Coffman 		       checksumdata + checksumlen - kctx->gk5e->cksumlength,
276e1f6c07bSKevin Coffman 		       kctx->gk5e->cksumlength);
277e1f6c07bSKevin Coffman 		break;
278958142e9SKevin Coffman 	case CKSUMTYPE_HMAC_SHA1_DES3:
279958142e9SKevin Coffman 		memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
280958142e9SKevin Coffman 		break;
281e1f6c07bSKevin Coffman 	default:
282e1f6c07bSKevin Coffman 		BUG();
283e1f6c07bSKevin Coffman 		break;
284e1f6c07bSKevin Coffman 	}
285e1f6c07bSKevin Coffman 	cksumout->len = kctx->gk5e->cksumlength;
28635058687SHerbert Xu out:
2873b5cf20cSHerbert Xu 	ahash_request_free(req);
2882876a344SJ. Bruce Fields out_free_ahash:
2893b5cf20cSHerbert Xu 	crypto_free_ahash(tfm);
2902876a344SJ. Bruce Fields out_free_cksum:
2912876a344SJ. Bruce Fields 	kfree(checksumdata);
29235058687SHerbert Xu 	return err ? GSS_S_FAILURE : 0;
2931da177e4SLinus Torvalds }
2941da177e4SLinus Torvalds 
2952dbe0cacSChuck Lever /**
2962dbe0cacSChuck Lever  * gss_krb5_checksum - Compute the MAC for a GSS Wrap or MIC token
2972dbe0cacSChuck Lever  * @tfm: an initialized hash transform
2982dbe0cacSChuck Lever  * @header: pointer to a buffer containing the token header, or NULL
2992dbe0cacSChuck Lever  * @hdrlen: number of octets in @header
3002dbe0cacSChuck Lever  * @body: xdr_buf containing an RPC message (body.len is the message length)
3012dbe0cacSChuck Lever  * @body_offset: byte offset into @body to start checksumming
3022dbe0cacSChuck Lever  * @cksumout: OUT: a buffer to be filled in with the computed HMAC
3032dbe0cacSChuck Lever  *
3042dbe0cacSChuck Lever  * Usually expressed as H = HMAC(K, message)[1..h] .
3052dbe0cacSChuck Lever  *
3062dbe0cacSChuck Lever  * Caller provides the truncation length of the output token (h) in
3072dbe0cacSChuck Lever  * cksumout.len.
3082dbe0cacSChuck Lever  *
3092dbe0cacSChuck Lever  * Return values:
3102dbe0cacSChuck Lever  *   %GSS_S_COMPLETE: Digest computed, @cksumout filled in
3112dbe0cacSChuck Lever  *   %GSS_S_FAILURE: Call failed
3122dbe0cacSChuck Lever  */
3132dbe0cacSChuck Lever u32
gss_krb5_checksum(struct crypto_ahash * tfm,char * header,int hdrlen,const struct xdr_buf * body,int body_offset,struct xdr_netobj * cksumout)3142dbe0cacSChuck Lever gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
3152dbe0cacSChuck Lever 		  const struct xdr_buf *body, int body_offset,
3162dbe0cacSChuck Lever 		  struct xdr_netobj *cksumout)
3172dbe0cacSChuck Lever {
3182dbe0cacSChuck Lever 	struct ahash_request *req;
3192dbe0cacSChuck Lever 	int err = -ENOMEM;
3202dbe0cacSChuck Lever 	u8 *checksumdata;
3212dbe0cacSChuck Lever 
3222dbe0cacSChuck Lever 	checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
3232dbe0cacSChuck Lever 	if (!checksumdata)
3242dbe0cacSChuck Lever 		return GSS_S_FAILURE;
3252dbe0cacSChuck Lever 
3262dbe0cacSChuck Lever 	req = ahash_request_alloc(tfm, GFP_KERNEL);
3272dbe0cacSChuck Lever 	if (!req)
3282dbe0cacSChuck Lever 		goto out_free_cksum;
3292dbe0cacSChuck Lever 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
3302dbe0cacSChuck Lever 	err = crypto_ahash_init(req);
3312dbe0cacSChuck Lever 	if (err)
3322dbe0cacSChuck Lever 		goto out_free_ahash;
3332dbe0cacSChuck Lever 
3342dbe0cacSChuck Lever 	/*
3352dbe0cacSChuck Lever 	 * Per RFC 4121 Section 4.2.4, the checksum is performed over the
3362dbe0cacSChuck Lever 	 * data body first, then over the octets in "header".
3372dbe0cacSChuck Lever 	 */
3382dbe0cacSChuck Lever 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
3392dbe0cacSChuck Lever 			      checksummer, req);
3402dbe0cacSChuck Lever 	if (err)
3412dbe0cacSChuck Lever 		goto out_free_ahash;
3422dbe0cacSChuck Lever 	if (header) {
3432dbe0cacSChuck Lever 		struct scatterlist sg[1];
3442dbe0cacSChuck Lever 
3452dbe0cacSChuck Lever 		sg_init_one(sg, header, hdrlen);
3462dbe0cacSChuck Lever 		ahash_request_set_crypt(req, sg, NULL, hdrlen);
3472dbe0cacSChuck Lever 		err = crypto_ahash_update(req);
3482dbe0cacSChuck Lever 		if (err)
3492dbe0cacSChuck Lever 			goto out_free_ahash;
3502dbe0cacSChuck Lever 	}
3512dbe0cacSChuck Lever 
3522dbe0cacSChuck Lever 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
3532dbe0cacSChuck Lever 	err = crypto_ahash_final(req);
3542dbe0cacSChuck Lever 	if (err)
3552dbe0cacSChuck Lever 		goto out_free_ahash;
3565f24a872SChuck Lever 
3575f24a872SChuck Lever 	memcpy(cksumout->data, checksumdata,
3585f24a872SChuck Lever 	       min_t(int, cksumout->len, crypto_ahash_digestsize(tfm)));
3592dbe0cacSChuck Lever 
3602dbe0cacSChuck Lever out_free_ahash:
3612dbe0cacSChuck Lever 	ahash_request_free(req);
3622dbe0cacSChuck Lever out_free_cksum:
3632dbe0cacSChuck Lever 	kfree_sensitive(checksumdata);
3642dbe0cacSChuck Lever 	return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
3652dbe0cacSChuck Lever }
36602142b2cSChuck Lever EXPORT_SYMBOL_IF_KUNIT(gss_krb5_checksum);
3672dbe0cacSChuck Lever 
36814ae162cSJ. Bruce Fields struct encryptor_desc {
36981d4a433SKevin Coffman 	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
3703b5cf20cSHerbert Xu 	struct skcipher_request *req;
37114ae162cSJ. Bruce Fields 	int pos;
37214ae162cSJ. Bruce Fields 	struct xdr_buf *outbuf;
37314ae162cSJ. Bruce Fields 	struct page **pages;
37414ae162cSJ. Bruce Fields 	struct scatterlist infrags[4];
37514ae162cSJ. Bruce Fields 	struct scatterlist outfrags[4];
37614ae162cSJ. Bruce Fields 	int fragno;
37714ae162cSJ. Bruce Fields 	int fraglen;
37814ae162cSJ. Bruce Fields };
37914ae162cSJ. Bruce Fields 
38014ae162cSJ. Bruce Fields static int
encryptor(struct scatterlist * sg,void * data)38114ae162cSJ. Bruce Fields encryptor(struct scatterlist *sg, void *data)
38214ae162cSJ. Bruce Fields {
38314ae162cSJ. Bruce Fields 	struct encryptor_desc *desc = data;
38414ae162cSJ. Bruce Fields 	struct xdr_buf *outbuf = desc->outbuf;
385e9e575b8SKees Cook 	struct crypto_sync_skcipher *tfm =
386e9e575b8SKees Cook 		crypto_sync_skcipher_reqtfm(desc->req);
38714ae162cSJ. Bruce Fields 	struct page *in_page;
38814ae162cSJ. Bruce Fields 	int thislen = desc->fraglen + sg->length;
38914ae162cSJ. Bruce Fields 	int fraglen, ret;
39014ae162cSJ. Bruce Fields 	int page_pos;
39114ae162cSJ. Bruce Fields 
39214ae162cSJ. Bruce Fields 	/* Worst case is 4 fragments: head, end of page 1, start
39314ae162cSJ. Bruce Fields 	 * of page 2, tail.  Anything more is a bug. */
39414ae162cSJ. Bruce Fields 	BUG_ON(desc->fragno > 3);
39514ae162cSJ. Bruce Fields 
39614ae162cSJ. Bruce Fields 	page_pos = desc->pos - outbuf->head[0].iov_len;
39714ae162cSJ. Bruce Fields 	if (page_pos >= 0 && page_pos < outbuf->page_len) {
39814ae162cSJ. Bruce Fields 		/* pages are not in place: */
39909cbfeafSKirill A. Shutemov 		int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT;
40014ae162cSJ. Bruce Fields 		in_page = desc->pages[i];
40114ae162cSJ. Bruce Fields 	} else {
402fa05f128SJens Axboe 		in_page = sg_page(sg);
40314ae162cSJ. Bruce Fields 	}
40468e3f5ddSHerbert Xu 	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
40568e3f5ddSHerbert Xu 		    sg->offset);
40668e3f5ddSHerbert Xu 	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
40768e3f5ddSHerbert Xu 		    sg->offset);
40814ae162cSJ. Bruce Fields 	desc->fragno++;
40914ae162cSJ. Bruce Fields 	desc->fraglen += sg->length;
41014ae162cSJ. Bruce Fields 	desc->pos += sg->length;
41114ae162cSJ. Bruce Fields 
412e9e575b8SKees Cook 	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
41314ae162cSJ. Bruce Fields 	thislen -= fraglen;
41414ae162cSJ. Bruce Fields 
41514ae162cSJ. Bruce Fields 	if (thislen == 0)
41614ae162cSJ. Bruce Fields 		return 0;
41714ae162cSJ. Bruce Fields 
418c46f2334SJens Axboe 	sg_mark_end(&desc->infrags[desc->fragno - 1]);
419c46f2334SJens Axboe 	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
42068e3f5ddSHerbert Xu 
4213b5cf20cSHerbert Xu 	skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
4223b5cf20cSHerbert Xu 				   thislen, desc->iv);
4233b5cf20cSHerbert Xu 
4243b5cf20cSHerbert Xu 	ret = crypto_skcipher_encrypt(desc->req);
42514ae162cSJ. Bruce Fields 	if (ret)
42614ae162cSJ. Bruce Fields 		return ret;
42768e3f5ddSHerbert Xu 
42868e3f5ddSHerbert Xu 	sg_init_table(desc->infrags, 4);
42968e3f5ddSHerbert Xu 	sg_init_table(desc->outfrags, 4);
43068e3f5ddSHerbert Xu 
43114ae162cSJ. Bruce Fields 	if (fraglen) {
432642f1490SJens Axboe 		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
433642f1490SJens Axboe 				sg->offset + sg->length - fraglen);
43414ae162cSJ. Bruce Fields 		desc->infrags[0] = desc->outfrags[0];
435642f1490SJens Axboe 		sg_assign_page(&desc->infrags[0], in_page);
43614ae162cSJ. Bruce Fields 		desc->fragno = 1;
43714ae162cSJ. Bruce Fields 		desc->fraglen = fraglen;
43814ae162cSJ. Bruce Fields 	} else {
43914ae162cSJ. Bruce Fields 		desc->fragno = 0;
44014ae162cSJ. Bruce Fields 		desc->fraglen = 0;
44114ae162cSJ. Bruce Fields 	}
44214ae162cSJ. Bruce Fields 	return 0;
44314ae162cSJ. Bruce Fields }
44414ae162cSJ. Bruce Fields 
44514ae162cSJ. Bruce Fields int
gss_encrypt_xdr_buf(struct crypto_sync_skcipher * tfm,struct xdr_buf * buf,int offset,struct page ** pages)446e9e575b8SKees Cook gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
447378c6697SHerbert Xu 		    int offset, struct page **pages)
44814ae162cSJ. Bruce Fields {
44914ae162cSJ. Bruce Fields 	int ret;
45014ae162cSJ. Bruce Fields 	struct encryptor_desc desc;
451e9e575b8SKees Cook 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
45214ae162cSJ. Bruce Fields 
453e9e575b8SKees Cook 	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
4543b5cf20cSHerbert Xu 
455e9e575b8SKees Cook 	skcipher_request_set_sync_tfm(req, tfm);
4563b5cf20cSHerbert Xu 	skcipher_request_set_callback(req, 0, NULL, NULL);
45714ae162cSJ. Bruce Fields 
45814ae162cSJ. Bruce Fields 	memset(desc.iv, 0, sizeof(desc.iv));
4593b5cf20cSHerbert Xu 	desc.req = req;
46014ae162cSJ. Bruce Fields 	desc.pos = offset;
46114ae162cSJ. Bruce Fields 	desc.outbuf = buf;
46214ae162cSJ. Bruce Fields 	desc.pages = pages;
46314ae162cSJ. Bruce Fields 	desc.fragno = 0;
46414ae162cSJ. Bruce Fields 	desc.fraglen = 0;
46514ae162cSJ. Bruce Fields 
46668e3f5ddSHerbert Xu 	sg_init_table(desc.infrags, 4);
46768e3f5ddSHerbert Xu 	sg_init_table(desc.outfrags, 4);
46868e3f5ddSHerbert Xu 
46937a4e6cbSOlga Kornievskaia 	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
4703b5cf20cSHerbert Xu 	skcipher_request_zero(req);
47114ae162cSJ. Bruce Fields 	return ret;
47214ae162cSJ. Bruce Fields }
47314ae162cSJ. Bruce Fields 
47414ae162cSJ. Bruce Fields struct decryptor_desc {
47581d4a433SKevin Coffman 	u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
4763b5cf20cSHerbert Xu 	struct skcipher_request *req;
47714ae162cSJ. Bruce Fields 	struct scatterlist frags[4];
47814ae162cSJ. Bruce Fields 	int fragno;
47914ae162cSJ. Bruce Fields 	int fraglen;
48014ae162cSJ. Bruce Fields };
48114ae162cSJ. Bruce Fields 
48214ae162cSJ. Bruce Fields static int
decryptor(struct scatterlist * sg,void * data)48314ae162cSJ. Bruce Fields decryptor(struct scatterlist *sg, void *data)
48414ae162cSJ. Bruce Fields {
48514ae162cSJ. Bruce Fields 	struct decryptor_desc *desc = data;
48614ae162cSJ. Bruce Fields 	int thislen = desc->fraglen + sg->length;
487e9e575b8SKees Cook 	struct crypto_sync_skcipher *tfm =
488e9e575b8SKees Cook 		crypto_sync_skcipher_reqtfm(desc->req);
48914ae162cSJ. Bruce Fields 	int fraglen, ret;
49014ae162cSJ. Bruce Fields 
49114ae162cSJ. Bruce Fields 	/* Worst case is 4 fragments: head, end of page 1, start
49214ae162cSJ. Bruce Fields 	 * of page 2, tail.  Anything more is a bug. */
49314ae162cSJ. Bruce Fields 	BUG_ON(desc->fragno > 3);
49468e3f5ddSHerbert Xu 	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
49568e3f5ddSHerbert Xu 		    sg->offset);
49614ae162cSJ. Bruce Fields 	desc->fragno++;
49714ae162cSJ. Bruce Fields 	desc->fraglen += sg->length;
49814ae162cSJ. Bruce Fields 
499e9e575b8SKees Cook 	fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
50014ae162cSJ. Bruce Fields 	thislen -= fraglen;
50114ae162cSJ. Bruce Fields 
50214ae162cSJ. Bruce Fields 	if (thislen == 0)
50314ae162cSJ. Bruce Fields 		return 0;
50414ae162cSJ. Bruce Fields 
505c46f2334SJens Axboe 	sg_mark_end(&desc->frags[desc->fragno - 1]);
50668e3f5ddSHerbert Xu 
5073b5cf20cSHerbert Xu 	skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
5083b5cf20cSHerbert Xu 				   thislen, desc->iv);
5093b5cf20cSHerbert Xu 
5103b5cf20cSHerbert Xu 	ret = crypto_skcipher_decrypt(desc->req);
51114ae162cSJ. Bruce Fields 	if (ret)
51214ae162cSJ. Bruce Fields 		return ret;
51368e3f5ddSHerbert Xu 
51468e3f5ddSHerbert Xu 	sg_init_table(desc->frags, 4);
51568e3f5ddSHerbert Xu 
51614ae162cSJ. Bruce Fields 	if (fraglen) {
517642f1490SJens Axboe 		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
518642f1490SJens Axboe 				sg->offset + sg->length - fraglen);
51914ae162cSJ. Bruce Fields 		desc->fragno = 1;
52014ae162cSJ. Bruce Fields 		desc->fraglen = fraglen;
52114ae162cSJ. Bruce Fields 	} else {
52214ae162cSJ. Bruce Fields 		desc->fragno = 0;
52314ae162cSJ. Bruce Fields 		desc->fraglen = 0;
52414ae162cSJ. Bruce Fields 	}
52514ae162cSJ. Bruce Fields 	return 0;
52614ae162cSJ. Bruce Fields }
52714ae162cSJ. Bruce Fields 
52814ae162cSJ. Bruce Fields int
gss_decrypt_xdr_buf(struct crypto_sync_skcipher * tfm,struct xdr_buf * buf,int offset)529e9e575b8SKees Cook gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
530378c6697SHerbert Xu 		    int offset)
53114ae162cSJ. Bruce Fields {
5323b5cf20cSHerbert Xu 	int ret;
53314ae162cSJ. Bruce Fields 	struct decryptor_desc desc;
534e9e575b8SKees Cook 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
53514ae162cSJ. Bruce Fields 
53614ae162cSJ. Bruce Fields 	/* XXXJBF: */
537e9e575b8SKees Cook 	BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
5383b5cf20cSHerbert Xu 
539e9e575b8SKees Cook 	skcipher_request_set_sync_tfm(req, tfm);
5403b5cf20cSHerbert Xu 	skcipher_request_set_callback(req, 0, NULL, NULL);
54114ae162cSJ. Bruce Fields 
54214ae162cSJ. Bruce Fields 	memset(desc.iv, 0, sizeof(desc.iv));
5433b5cf20cSHerbert Xu 	desc.req = req;
54414ae162cSJ. Bruce Fields 	desc.fragno = 0;
54514ae162cSJ. Bruce Fields 	desc.fraglen = 0;
54668e3f5ddSHerbert Xu 
54768e3f5ddSHerbert Xu 	sg_init_table(desc.frags, 4);
54868e3f5ddSHerbert Xu 
5493b5cf20cSHerbert Xu 	ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
5503b5cf20cSHerbert Xu 	skcipher_request_zero(req);
5513b5cf20cSHerbert Xu 	return ret;
55214ae162cSJ. Bruce Fields }
553725f2865SKevin Coffman 
554725f2865SKevin Coffman /*
555725f2865SKevin Coffman  * This function makes the assumption that it was ultimately called
556725f2865SKevin Coffman  * from gss_wrap().
557725f2865SKevin Coffman  *
558725f2865SKevin Coffman  * The client auth_gss code moves any existing tail data into a
559725f2865SKevin Coffman  * separate page before calling gss_wrap.
560725f2865SKevin Coffman  * The server svcauth_gss code ensures that both the head and the
561725f2865SKevin Coffman  * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
562725f2865SKevin Coffman  *
563725f2865SKevin Coffman  * Even with that guarantee, this function may be called more than
564725f2865SKevin Coffman  * once in the processing of gss_wrap().  The best we can do is
565725f2865SKevin Coffman  * verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
566725f2865SKevin Coffman  * largest expected shift will fit within RPC_MAX_AUTH_SIZE.
567725f2865SKevin Coffman  * At run-time we can verify that a single invocation of this
568725f2865SKevin Coffman  * function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
569725f2865SKevin Coffman  */
570725f2865SKevin Coffman 
571725f2865SKevin Coffman int
xdr_extend_head(struct xdr_buf * buf,unsigned int base,unsigned int shiftlen)572725f2865SKevin Coffman xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
573725f2865SKevin Coffman {
574725f2865SKevin Coffman 	u8 *p;
575725f2865SKevin Coffman 
576725f2865SKevin Coffman 	if (shiftlen == 0)
577725f2865SKevin Coffman 		return 0;
578725f2865SKevin Coffman 
579725f2865SKevin Coffman 	BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
580725f2865SKevin Coffman 
581725f2865SKevin Coffman 	p = buf->head[0].iov_base + base;
582725f2865SKevin Coffman 
583725f2865SKevin Coffman 	memmove(p + shiftlen, p, buf->head[0].iov_len - base);
584725f2865SKevin Coffman 
585725f2865SKevin Coffman 	buf->head[0].iov_len += shiftlen;
586725f2865SKevin Coffman 	buf->len += shiftlen;
587725f2865SKevin Coffman 
588725f2865SKevin Coffman 	return 0;
589725f2865SKevin Coffman }
590934a95aaSKevin Coffman 
591934a95aaSKevin Coffman static u32
gss_krb5_cts_crypt(struct crypto_sync_skcipher * cipher,struct xdr_buf * buf,u32 offset,u8 * iv,struct page ** pages,int encrypt)592e9e575b8SKees Cook gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
593934a95aaSKevin Coffman 		   u32 offset, u8 *iv, struct page **pages, int encrypt)
594934a95aaSKevin Coffman {
595934a95aaSKevin Coffman 	u32 ret;
596934a95aaSKevin Coffman 	struct scatterlist sg[1];
597e9e575b8SKees Cook 	SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
5982876a344SJ. Bruce Fields 	u8 *data;
599934a95aaSKevin Coffman 	struct page **save_pages;
600934a95aaSKevin Coffman 	u32 len = buf->len - offset;
601934a95aaSKevin Coffman 
6022876a344SJ. Bruce Fields 	if (len > GSS_KRB5_MAX_BLOCKSIZE * 2) {
6030097143cSTrond Myklebust 		WARN_ON(0);
6040097143cSTrond Myklebust 		return -ENOMEM;
6050097143cSTrond Myklebust 	}
6064c2883e7STrond Myklebust 	data = kmalloc(GSS_KRB5_MAX_BLOCKSIZE * 2, GFP_KERNEL);
6072876a344SJ. Bruce Fields 	if (!data)
6082876a344SJ. Bruce Fields 		return -ENOMEM;
609934a95aaSKevin Coffman 
610934a95aaSKevin Coffman 	/*
611934a95aaSKevin Coffman 	 * For encryption, we want to read from the cleartext
612934a95aaSKevin Coffman 	 * page cache pages, and write the encrypted data to
613934a95aaSKevin Coffman 	 * the supplied xdr_buf pages.
614934a95aaSKevin Coffman 	 */
615934a95aaSKevin Coffman 	save_pages = buf->pages;
616934a95aaSKevin Coffman 	if (encrypt)
617934a95aaSKevin Coffman 		buf->pages = pages;
618934a95aaSKevin Coffman 
619934a95aaSKevin Coffman 	ret = read_bytes_from_xdr_buf(buf, offset, data, len);
620934a95aaSKevin Coffman 	buf->pages = save_pages;
621934a95aaSKevin Coffman 	if (ret)
622934a95aaSKevin Coffman 		goto out;
623934a95aaSKevin Coffman 
624934a95aaSKevin Coffman 	sg_init_one(sg, data, len);
625934a95aaSKevin Coffman 
626e9e575b8SKees Cook 	skcipher_request_set_sync_tfm(req, cipher);
6273b5cf20cSHerbert Xu 	skcipher_request_set_callback(req, 0, NULL, NULL);
6283b5cf20cSHerbert Xu 	skcipher_request_set_crypt(req, sg, sg, len, iv);
6293b5cf20cSHerbert Xu 
630934a95aaSKevin Coffman 	if (encrypt)
6313b5cf20cSHerbert Xu 		ret = crypto_skcipher_encrypt(req);
632934a95aaSKevin Coffman 	else
6333b5cf20cSHerbert Xu 		ret = crypto_skcipher_decrypt(req);
6343b5cf20cSHerbert Xu 
6353b5cf20cSHerbert Xu 	skcipher_request_zero(req);
636934a95aaSKevin Coffman 
637934a95aaSKevin Coffman 	if (ret)
638934a95aaSKevin Coffman 		goto out;
639934a95aaSKevin Coffman 
640934a95aaSKevin Coffman 	ret = write_bytes_to_xdr_buf(buf, offset, data, len);
641934a95aaSKevin Coffman 
642*af97b7dfSArd Biesheuvel #if IS_ENABLED(CONFIG_KUNIT)
643*af97b7dfSArd Biesheuvel 	/*
644*af97b7dfSArd Biesheuvel 	 * CBC-CTS does not define an output IV but RFC 3962 defines it as the
645*af97b7dfSArd Biesheuvel 	 * penultimate block of ciphertext, so copy that into the IV buffer
646*af97b7dfSArd Biesheuvel 	 * before returning.
647*af97b7dfSArd Biesheuvel 	 */
648*af97b7dfSArd Biesheuvel 	if (encrypt)
649*af97b7dfSArd Biesheuvel 		memcpy(iv, data, crypto_sync_skcipher_ivsize(cipher));
650*af97b7dfSArd Biesheuvel #endif
651*af97b7dfSArd Biesheuvel 
652934a95aaSKevin Coffman out:
6532876a344SJ. Bruce Fields 	kfree(data);
654934a95aaSKevin Coffman 	return ret;
655934a95aaSKevin Coffman }
656934a95aaSKevin Coffman 
657e1a9a384SChuck Lever /**
658e1a9a384SChuck Lever  * krb5_cbc_cts_encrypt - encrypt in CBC mode with CTS
659e1a9a384SChuck Lever  * @cts_tfm: CBC cipher with CTS
660e1a9a384SChuck Lever  * @cbc_tfm: base CBC cipher
661e1a9a384SChuck Lever  * @offset: starting byte offset for plaintext
662e1a9a384SChuck Lever  * @buf: OUT: output buffer
663e1a9a384SChuck Lever  * @pages: plaintext
664e1a9a384SChuck Lever  * @iv: output CBC initialization vector, or NULL
665e1a9a384SChuck Lever  * @ivsize: size of @iv, in octets
666e1a9a384SChuck Lever  *
667dfb63243SChuck Lever  * To provide confidentiality, encrypt using cipher block chaining
668dfb63243SChuck Lever  * with ciphertext stealing. Message integrity is handled separately.
669e1a9a384SChuck Lever  *
670e1a9a384SChuck Lever  * Return values:
671e1a9a384SChuck Lever  *   %0: encryption successful
672e1a9a384SChuck Lever  *   negative errno: encryption could not be completed
673dfb63243SChuck Lever  */
674e1a9a384SChuck Lever VISIBLE_IF_KUNIT
krb5_cbc_cts_encrypt(struct crypto_sync_skcipher * cts_tfm,struct crypto_sync_skcipher * cbc_tfm,u32 offset,struct xdr_buf * buf,struct page ** pages,u8 * iv,unsigned int ivsize)675e1a9a384SChuck Lever int krb5_cbc_cts_encrypt(struct crypto_sync_skcipher *cts_tfm,
676dfb63243SChuck Lever 			 struct crypto_sync_skcipher *cbc_tfm,
677e1a9a384SChuck Lever 			 u32 offset, struct xdr_buf *buf, struct page **pages,
678e1a9a384SChuck Lever 			 u8 *iv, unsigned int ivsize)
679dfb63243SChuck Lever {
680dfb63243SChuck Lever 	u32 blocksize, nbytes, nblocks, cbcbytes;
681dfb63243SChuck Lever 	struct encryptor_desc desc;
682dfb63243SChuck Lever 	int err;
683dfb63243SChuck Lever 
684dfb63243SChuck Lever 	blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
685dfb63243SChuck Lever 	nbytes = buf->len - offset;
686dfb63243SChuck Lever 	nblocks = (nbytes + blocksize - 1) / blocksize;
687dfb63243SChuck Lever 	cbcbytes = 0;
688dfb63243SChuck Lever 	if (nblocks > 2)
689dfb63243SChuck Lever 		cbcbytes = (nblocks - 2) * blocksize;
690dfb63243SChuck Lever 
691dfb63243SChuck Lever 	memset(desc.iv, 0, sizeof(desc.iv));
692dfb63243SChuck Lever 
693dfb63243SChuck Lever 	/* Handle block-sized chunks of plaintext with CBC. */
694dfb63243SChuck Lever 	if (cbcbytes) {
695dfb63243SChuck Lever 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
696dfb63243SChuck Lever 
697dfb63243SChuck Lever 		desc.pos = offset;
698dfb63243SChuck Lever 		desc.fragno = 0;
699dfb63243SChuck Lever 		desc.fraglen = 0;
700dfb63243SChuck Lever 		desc.pages = pages;
701dfb63243SChuck Lever 		desc.outbuf = buf;
702dfb63243SChuck Lever 		desc.req = req;
703dfb63243SChuck Lever 
704dfb63243SChuck Lever 		skcipher_request_set_sync_tfm(req, cbc_tfm);
705dfb63243SChuck Lever 		skcipher_request_set_callback(req, 0, NULL, NULL);
706dfb63243SChuck Lever 
707dfb63243SChuck Lever 		sg_init_table(desc.infrags, 4);
708dfb63243SChuck Lever 		sg_init_table(desc.outfrags, 4);
709dfb63243SChuck Lever 
710dfb63243SChuck Lever 		err = xdr_process_buf(buf, offset, cbcbytes, encryptor, &desc);
711dfb63243SChuck Lever 		skcipher_request_zero(req);
712dfb63243SChuck Lever 		if (err)
713dfb63243SChuck Lever 			return err;
714dfb63243SChuck Lever 	}
715dfb63243SChuck Lever 
716dfb63243SChuck Lever 	/* Remaining plaintext is handled with CBC-CTS. */
717dfb63243SChuck Lever 	err = gss_krb5_cts_crypt(cts_tfm, buf, offset + cbcbytes,
718dfb63243SChuck Lever 				 desc.iv, pages, 1);
719dfb63243SChuck Lever 	if (err)
720dfb63243SChuck Lever 		return err;
721dfb63243SChuck Lever 
722e1a9a384SChuck Lever 	if (unlikely(iv))
723e1a9a384SChuck Lever 		memcpy(iv, desc.iv, ivsize);
724dfb63243SChuck Lever 	return 0;
725dfb63243SChuck Lever }
726e1a9a384SChuck Lever EXPORT_SYMBOL_IF_KUNIT(krb5_cbc_cts_encrypt);
727dfb63243SChuck Lever 
728c4a9f055SChuck Lever /**
729c4a9f055SChuck Lever  * krb5_cbc_cts_decrypt - decrypt in CBC mode with CTS
730c4a9f055SChuck Lever  * @cts_tfm: CBC cipher with CTS
731c4a9f055SChuck Lever  * @cbc_tfm: base CBC cipher
732c4a9f055SChuck Lever  * @offset: starting byte offset for plaintext
733c4a9f055SChuck Lever  * @buf: OUT: output buffer
734c4a9f055SChuck Lever  *
735c4a9f055SChuck Lever  * Return values:
736c4a9f055SChuck Lever  *   %0: decryption successful
737c4a9f055SChuck Lever  *   negative errno: decryption could not be completed
738c4a9f055SChuck Lever  */
739c4a9f055SChuck Lever VISIBLE_IF_KUNIT
krb5_cbc_cts_decrypt(struct crypto_sync_skcipher * cts_tfm,struct crypto_sync_skcipher * cbc_tfm,u32 offset,struct xdr_buf * buf)740c4a9f055SChuck Lever int krb5_cbc_cts_decrypt(struct crypto_sync_skcipher *cts_tfm,
741dfb63243SChuck Lever 			 struct crypto_sync_skcipher *cbc_tfm,
742dfb63243SChuck Lever 			 u32 offset, struct xdr_buf *buf)
743dfb63243SChuck Lever {
744dfb63243SChuck Lever 	u32 blocksize, nblocks, cbcbytes;
745dfb63243SChuck Lever 	struct decryptor_desc desc;
746dfb63243SChuck Lever 	int err;
747dfb63243SChuck Lever 
748dfb63243SChuck Lever 	blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
749dfb63243SChuck Lever 	nblocks = (buf->len + blocksize - 1) / blocksize;
750dfb63243SChuck Lever 	cbcbytes = 0;
751dfb63243SChuck Lever 	if (nblocks > 2)
752dfb63243SChuck Lever 		cbcbytes = (nblocks - 2) * blocksize;
753dfb63243SChuck Lever 
754dfb63243SChuck Lever 	memset(desc.iv, 0, sizeof(desc.iv));
755dfb63243SChuck Lever 
756dfb63243SChuck Lever 	/* Handle block-sized chunks of plaintext with CBC. */
757dfb63243SChuck Lever 	if (cbcbytes) {
758dfb63243SChuck Lever 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
759dfb63243SChuck Lever 
760dfb63243SChuck Lever 		desc.fragno = 0;
761dfb63243SChuck Lever 		desc.fraglen = 0;
762dfb63243SChuck Lever 		desc.req = req;
763dfb63243SChuck Lever 
764dfb63243SChuck Lever 		skcipher_request_set_sync_tfm(req, cbc_tfm);
765dfb63243SChuck Lever 		skcipher_request_set_callback(req, 0, NULL, NULL);
766dfb63243SChuck Lever 
767dfb63243SChuck Lever 		sg_init_table(desc.frags, 4);
768dfb63243SChuck Lever 
769dfb63243SChuck Lever 		err = xdr_process_buf(buf, 0, cbcbytes, decryptor, &desc);
770dfb63243SChuck Lever 		skcipher_request_zero(req);
771dfb63243SChuck Lever 		if (err)
772dfb63243SChuck Lever 			return err;
773dfb63243SChuck Lever 	}
774dfb63243SChuck Lever 
775dfb63243SChuck Lever 	/* Remaining plaintext is handled with CBC-CTS. */
776dfb63243SChuck Lever 	return gss_krb5_cts_crypt(cts_tfm, buf, cbcbytes, desc.iv, NULL, 0);
777dfb63243SChuck Lever }
778c4a9f055SChuck Lever EXPORT_SYMBOL_IF_KUNIT(krb5_cbc_cts_decrypt);
779dfb63243SChuck Lever 
780934a95aaSKevin Coffman u32
gss_krb5_aes_encrypt(struct krb5_ctx * kctx,u32 offset,struct xdr_buf * buf,struct page ** pages)781934a95aaSKevin Coffman gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
782ec25422cSJeff Layton 		     struct xdr_buf *buf, struct page **pages)
783934a95aaSKevin Coffman {
784934a95aaSKevin Coffman 	u32 err;
785934a95aaSKevin Coffman 	struct xdr_netobj hmac;
786934a95aaSKevin Coffman 	u8 *ecptr;
787e9e575b8SKees Cook 	struct crypto_sync_skcipher *cipher, *aux_cipher;
7888270dbfcSChuck Lever 	struct crypto_ahash *ahash;
789934a95aaSKevin Coffman 	struct page **save_pages;
7904be416a5SChuck Lever 	unsigned int conflen;
791934a95aaSKevin Coffman 
792934a95aaSKevin Coffman 	if (kctx->initiate) {
793934a95aaSKevin Coffman 		cipher = kctx->initiator_enc;
794934a95aaSKevin Coffman 		aux_cipher = kctx->initiator_enc_aux;
7958270dbfcSChuck Lever 		ahash = kctx->initiator_integ;
796934a95aaSKevin Coffman 	} else {
797934a95aaSKevin Coffman 		cipher = kctx->acceptor_enc;
798934a95aaSKevin Coffman 		aux_cipher = kctx->acceptor_enc_aux;
7998270dbfcSChuck Lever 		ahash = kctx->acceptor_integ;
800934a95aaSKevin Coffman 	}
8014be416a5SChuck Lever 	conflen = crypto_sync_skcipher_blocksize(cipher);
802934a95aaSKevin Coffman 
803934a95aaSKevin Coffman 	/* hide the gss token header and insert the confounder */
804934a95aaSKevin Coffman 	offset += GSS_KRB5_TOK_HDR_LEN;
8054be416a5SChuck Lever 	if (xdr_extend_head(buf, offset, conflen))
806934a95aaSKevin Coffman 		return GSS_S_FAILURE;
8077f675ca7SChuck Lever 	krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
808934a95aaSKevin Coffman 	offset -= GSS_KRB5_TOK_HDR_LEN;
809934a95aaSKevin Coffman 
810934a95aaSKevin Coffman 	if (buf->tail[0].iov_base != NULL) {
811934a95aaSKevin Coffman 		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
812934a95aaSKevin Coffman 	} else {
813934a95aaSKevin Coffman 		buf->tail[0].iov_base = buf->head[0].iov_base
814934a95aaSKevin Coffman 							+ buf->head[0].iov_len;
815934a95aaSKevin Coffman 		buf->tail[0].iov_len = 0;
816934a95aaSKevin Coffman 		ecptr = buf->tail[0].iov_base;
817934a95aaSKevin Coffman 	}
818934a95aaSKevin Coffman 
819934a95aaSKevin Coffman 	/* copy plaintext gss token header after filler (if any) */
820ec25422cSJeff Layton 	memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
821934a95aaSKevin Coffman 	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
822934a95aaSKevin Coffman 	buf->len += GSS_KRB5_TOK_HDR_LEN;
823934a95aaSKevin Coffman 
8245f24a872SChuck Lever 	hmac.len = kctx->gk5e->cksumlength;
825934a95aaSKevin Coffman 	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
826934a95aaSKevin Coffman 
827934a95aaSKevin Coffman 	/*
828934a95aaSKevin Coffman 	 * When we are called, pages points to the real page cache
829934a95aaSKevin Coffman 	 * data -- which we can't go and encrypt!  buf->pages points
830934a95aaSKevin Coffman 	 * to scratch pages which we are going to send off to the
831934a95aaSKevin Coffman 	 * client/server.  Swap in the plaintext pages to calculate
832934a95aaSKevin Coffman 	 * the hmac.
833934a95aaSKevin Coffman 	 */
834934a95aaSKevin Coffman 	save_pages = buf->pages;
835934a95aaSKevin Coffman 	buf->pages = pages;
836934a95aaSKevin Coffman 
8378270dbfcSChuck Lever 	err = gss_krb5_checksum(ahash, NULL, 0, buf,
8388270dbfcSChuck Lever 				offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
839934a95aaSKevin Coffman 	buf->pages = save_pages;
840934a95aaSKevin Coffman 	if (err)
841934a95aaSKevin Coffman 		return GSS_S_FAILURE;
842934a95aaSKevin Coffman 
843dfb63243SChuck Lever 	err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
844dfb63243SChuck Lever 				   offset + GSS_KRB5_TOK_HDR_LEN,
845e1a9a384SChuck Lever 				   buf, pages, NULL, 0);
846934a95aaSKevin Coffman 	if (err)
847dfb63243SChuck Lever 		return GSS_S_FAILURE;
848934a95aaSKevin Coffman 
849934a95aaSKevin Coffman 	/* Now update buf to account for HMAC */
850934a95aaSKevin Coffman 	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
851934a95aaSKevin Coffman 	buf->len += kctx->gk5e->cksumlength;
852934a95aaSKevin Coffman 
853dfb63243SChuck Lever 	return GSS_S_COMPLETE;
854934a95aaSKevin Coffman }
855934a95aaSKevin Coffman 
856934a95aaSKevin Coffman u32
gss_krb5_aes_decrypt(struct krb5_ctx * kctx,u32 offset,u32 len,struct xdr_buf * buf,u32 * headskip,u32 * tailskip)85731c9590aSChuck Lever gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
85831c9590aSChuck Lever 		     struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
859934a95aaSKevin Coffman {
860e9e575b8SKees Cook 	struct crypto_sync_skcipher *cipher, *aux_cipher;
8618270dbfcSChuck Lever 	struct crypto_ahash *ahash;
862934a95aaSKevin Coffman 	struct xdr_netobj our_hmac_obj;
863934a95aaSKevin Coffman 	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
864934a95aaSKevin Coffman 	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
865dfb63243SChuck Lever 	struct xdr_buf subbuf;
866dfb63243SChuck Lever 	u32 ret = 0;
867934a95aaSKevin Coffman 
868934a95aaSKevin Coffman 	if (kctx->initiate) {
869934a95aaSKevin Coffman 		cipher = kctx->acceptor_enc;
870934a95aaSKevin Coffman 		aux_cipher = kctx->acceptor_enc_aux;
8718270dbfcSChuck Lever 		ahash = kctx->acceptor_integ;
872934a95aaSKevin Coffman 	} else {
873934a95aaSKevin Coffman 		cipher = kctx->initiator_enc;
874934a95aaSKevin Coffman 		aux_cipher = kctx->initiator_enc_aux;
8758270dbfcSChuck Lever 		ahash = kctx->initiator_integ;
876934a95aaSKevin Coffman 	}
877934a95aaSKevin Coffman 
878934a95aaSKevin Coffman 	/* create a segment skipping the header and leaving out the checksum */
879934a95aaSKevin Coffman 	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
88031c9590aSChuck Lever 				    (len - offset - GSS_KRB5_TOK_HDR_LEN -
881934a95aaSKevin Coffman 				     kctx->gk5e->cksumlength));
882934a95aaSKevin Coffman 
883dfb63243SChuck Lever 	ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
884934a95aaSKevin Coffman 	if (ret)
885934a95aaSKevin Coffman 		goto out_err;
886934a95aaSKevin Coffman 
8875f24a872SChuck Lever 	our_hmac_obj.len = kctx->gk5e->cksumlength;
888934a95aaSKevin Coffman 	our_hmac_obj.data = our_hmac;
8898270dbfcSChuck Lever 	ret = gss_krb5_checksum(ahash, NULL, 0, &subbuf, 0, &our_hmac_obj);
890934a95aaSKevin Coffman 	if (ret)
891934a95aaSKevin Coffman 		goto out_err;
892934a95aaSKevin Coffman 
893934a95aaSKevin Coffman 	/* Get the packet's hmac value */
89431c9590aSChuck Lever 	ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
895934a95aaSKevin Coffman 				      pkt_hmac, kctx->gk5e->cksumlength);
896934a95aaSKevin Coffman 	if (ret)
897934a95aaSKevin Coffman 		goto out_err;
898934a95aaSKevin Coffman 
89915a8b93fSJason A. Donenfeld 	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
900934a95aaSKevin Coffman 		ret = GSS_S_BAD_SIG;
901934a95aaSKevin Coffman 		goto out_err;
902934a95aaSKevin Coffman 	}
903dfb63243SChuck Lever 	*headskip = crypto_sync_skcipher_blocksize(cipher);
904934a95aaSKevin Coffman 	*tailskip = kctx->gk5e->cksumlength;
905934a95aaSKevin Coffman out_err:
906934a95aaSKevin Coffman 	if (ret && ret != GSS_S_BAD_SIG)
907934a95aaSKevin Coffman 		ret = GSS_S_FAILURE;
908934a95aaSKevin Coffman 	return ret;
909934a95aaSKevin Coffman }
9100d5b5a0fSChuck Lever 
9114d2d15c0SChuck Lever /**
9124d2d15c0SChuck Lever  * krb5_etm_checksum - Compute a MAC for a GSS Wrap token
9134d2d15c0SChuck Lever  * @cipher: an initialized cipher transform
9144d2d15c0SChuck Lever  * @tfm: an initialized hash transform
9154d2d15c0SChuck Lever  * @body: xdr_buf containing an RPC message (body.len is the message length)
9164d2d15c0SChuck Lever  * @body_offset: byte offset into @body to start checksumming
9174d2d15c0SChuck Lever  * @cksumout: OUT: a buffer to be filled in with the computed HMAC
9184d2d15c0SChuck Lever  *
9194d2d15c0SChuck Lever  * Usually expressed as H = HMAC(K, IV | ciphertext)[1..h] .
9204d2d15c0SChuck Lever  *
9214d2d15c0SChuck Lever  * Caller provides the truncation length of the output token (h) in
9224d2d15c0SChuck Lever  * cksumout.len.
9234d2d15c0SChuck Lever  *
9244d2d15c0SChuck Lever  * Return values:
9254d2d15c0SChuck Lever  *   %GSS_S_COMPLETE: Digest computed, @cksumout filled in
9264d2d15c0SChuck Lever  *   %GSS_S_FAILURE: Call failed
9274d2d15c0SChuck Lever  */
9284d2d15c0SChuck Lever VISIBLE_IF_KUNIT
krb5_etm_checksum(struct crypto_sync_skcipher * cipher,struct crypto_ahash * tfm,const struct xdr_buf * body,int body_offset,struct xdr_netobj * cksumout)9294d2d15c0SChuck Lever u32 krb5_etm_checksum(struct crypto_sync_skcipher *cipher,
9300d5b5a0fSChuck Lever 		      struct crypto_ahash *tfm, const struct xdr_buf *body,
9310d5b5a0fSChuck Lever 		      int body_offset, struct xdr_netobj *cksumout)
9320d5b5a0fSChuck Lever {
9330d5b5a0fSChuck Lever 	unsigned int ivsize = crypto_sync_skcipher_ivsize(cipher);
9340d5b5a0fSChuck Lever 	struct ahash_request *req;
9350d5b5a0fSChuck Lever 	struct scatterlist sg[1];
9360d5b5a0fSChuck Lever 	u8 *iv, *checksumdata;
9370d5b5a0fSChuck Lever 	int err = -ENOMEM;
9380d5b5a0fSChuck Lever 
9390d5b5a0fSChuck Lever 	checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
9400d5b5a0fSChuck Lever 	if (!checksumdata)
9410d5b5a0fSChuck Lever 		return GSS_S_FAILURE;
9420d5b5a0fSChuck Lever 	/* For RPCSEC, the "initial cipher state" is always all zeroes. */
9430d5b5a0fSChuck Lever 	iv = kzalloc(ivsize, GFP_KERNEL);
9440d5b5a0fSChuck Lever 	if (!iv)
9450d5b5a0fSChuck Lever 		goto out_free_mem;
9460d5b5a0fSChuck Lever 
9470d5b5a0fSChuck Lever 	req = ahash_request_alloc(tfm, GFP_KERNEL);
9480d5b5a0fSChuck Lever 	if (!req)
9490d5b5a0fSChuck Lever 		goto out_free_mem;
9500d5b5a0fSChuck Lever 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
9510d5b5a0fSChuck Lever 	err = crypto_ahash_init(req);
9520d5b5a0fSChuck Lever 	if (err)
9530d5b5a0fSChuck Lever 		goto out_free_ahash;
9540d5b5a0fSChuck Lever 
9550d5b5a0fSChuck Lever 	sg_init_one(sg, iv, ivsize);
9560d5b5a0fSChuck Lever 	ahash_request_set_crypt(req, sg, NULL, ivsize);
9570d5b5a0fSChuck Lever 	err = crypto_ahash_update(req);
9580d5b5a0fSChuck Lever 	if (err)
9590d5b5a0fSChuck Lever 		goto out_free_ahash;
9600d5b5a0fSChuck Lever 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
9610d5b5a0fSChuck Lever 			      checksummer, req);
9620d5b5a0fSChuck Lever 	if (err)
9630d5b5a0fSChuck Lever 		goto out_free_ahash;
9640d5b5a0fSChuck Lever 
9650d5b5a0fSChuck Lever 	ahash_request_set_crypt(req, NULL, checksumdata, 0);
9660d5b5a0fSChuck Lever 	err = crypto_ahash_final(req);
9670d5b5a0fSChuck Lever 	if (err)
9680d5b5a0fSChuck Lever 		goto out_free_ahash;
9690d5b5a0fSChuck Lever 	memcpy(cksumout->data, checksumdata, cksumout->len);
9700d5b5a0fSChuck Lever 
9710d5b5a0fSChuck Lever out_free_ahash:
9720d5b5a0fSChuck Lever 	ahash_request_free(req);
9730d5b5a0fSChuck Lever out_free_mem:
9740d5b5a0fSChuck Lever 	kfree(iv);
9750d5b5a0fSChuck Lever 	kfree_sensitive(checksumdata);
9760d5b5a0fSChuck Lever 	return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
9770d5b5a0fSChuck Lever }
9784d2d15c0SChuck Lever EXPORT_SYMBOL_IF_KUNIT(krb5_etm_checksum);
9790d5b5a0fSChuck Lever 
9800d5b5a0fSChuck Lever /**
9810d5b5a0fSChuck Lever  * krb5_etm_encrypt - Encrypt using the RFC 8009 rules
9820d5b5a0fSChuck Lever  * @kctx: Kerberos context
9830d5b5a0fSChuck Lever  * @offset: starting offset of the payload, in bytes
9840d5b5a0fSChuck Lever  * @buf: OUT: send buffer to contain the encrypted payload
9850d5b5a0fSChuck Lever  * @pages: plaintext payload
9860d5b5a0fSChuck Lever  *
9870d5b5a0fSChuck Lever  * The main difference with aes_encrypt is that "The HMAC is
9880d5b5a0fSChuck Lever  * calculated over the cipher state concatenated with the AES
9890d5b5a0fSChuck Lever  * output, instead of being calculated over the confounder and
9900d5b5a0fSChuck Lever  * plaintext.  This allows the message receiver to verify the
9910d5b5a0fSChuck Lever  * integrity of the message before decrypting the message."
9920d5b5a0fSChuck Lever  *
9930d5b5a0fSChuck Lever  * RFC 8009 Section 5:
9940d5b5a0fSChuck Lever  *
9950d5b5a0fSChuck Lever  * encryption function: as follows, where E() is AES encryption in
9960d5b5a0fSChuck Lever  * CBC-CS3 mode, and h is the size of truncated HMAC (128 bits or
9970d5b5a0fSChuck Lever  * 192 bits as described above).
9980d5b5a0fSChuck Lever  *
9990d5b5a0fSChuck Lever  *    N = random value of length 128 bits (the AES block size)
10000d5b5a0fSChuck Lever  *    IV = cipher state
10010d5b5a0fSChuck Lever  *    C = E(Ke, N | plaintext, IV)
10020d5b5a0fSChuck Lever  *    H = HMAC(Ki, IV | C)
10030d5b5a0fSChuck Lever  *    ciphertext = C | H[1..h]
10040d5b5a0fSChuck Lever  *
10050d5b5a0fSChuck Lever  * This encryption formula provides AEAD EtM with key separation.
10060d5b5a0fSChuck Lever  *
10070d5b5a0fSChuck Lever  * Return values:
10080d5b5a0fSChuck Lever  *   %GSS_S_COMPLETE: Encryption successful
10090d5b5a0fSChuck Lever  *   %GSS_S_FAILURE: Encryption failed
10100d5b5a0fSChuck Lever  */
10110d5b5a0fSChuck Lever u32
krb5_etm_encrypt(struct krb5_ctx * kctx,u32 offset,struct xdr_buf * buf,struct page ** pages)10120d5b5a0fSChuck Lever krb5_etm_encrypt(struct krb5_ctx *kctx, u32 offset,
10130d5b5a0fSChuck Lever 		 struct xdr_buf *buf, struct page **pages)
10140d5b5a0fSChuck Lever {
10150d5b5a0fSChuck Lever 	struct crypto_sync_skcipher *cipher, *aux_cipher;
10160d5b5a0fSChuck Lever 	struct crypto_ahash *ahash;
10170d5b5a0fSChuck Lever 	struct xdr_netobj hmac;
10180d5b5a0fSChuck Lever 	unsigned int conflen;
10190d5b5a0fSChuck Lever 	u8 *ecptr;
10200d5b5a0fSChuck Lever 	u32 err;
10210d5b5a0fSChuck Lever 
10220d5b5a0fSChuck Lever 	if (kctx->initiate) {
10230d5b5a0fSChuck Lever 		cipher = kctx->initiator_enc;
10240d5b5a0fSChuck Lever 		aux_cipher = kctx->initiator_enc_aux;
10250d5b5a0fSChuck Lever 		ahash = kctx->initiator_integ;
10260d5b5a0fSChuck Lever 	} else {
10270d5b5a0fSChuck Lever 		cipher = kctx->acceptor_enc;
10280d5b5a0fSChuck Lever 		aux_cipher = kctx->acceptor_enc_aux;
10290d5b5a0fSChuck Lever 		ahash = kctx->acceptor_integ;
10300d5b5a0fSChuck Lever 	}
10310d5b5a0fSChuck Lever 	conflen = crypto_sync_skcipher_blocksize(cipher);
10320d5b5a0fSChuck Lever 
10330d5b5a0fSChuck Lever 	offset += GSS_KRB5_TOK_HDR_LEN;
10340d5b5a0fSChuck Lever 	if (xdr_extend_head(buf, offset, conflen))
10350d5b5a0fSChuck Lever 		return GSS_S_FAILURE;
10360d5b5a0fSChuck Lever 	krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
10370d5b5a0fSChuck Lever 	offset -= GSS_KRB5_TOK_HDR_LEN;
10380d5b5a0fSChuck Lever 
10390d5b5a0fSChuck Lever 	if (buf->tail[0].iov_base) {
10400d5b5a0fSChuck Lever 		ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
10410d5b5a0fSChuck Lever 	} else {
10420d5b5a0fSChuck Lever 		buf->tail[0].iov_base = buf->head[0].iov_base
10430d5b5a0fSChuck Lever 							+ buf->head[0].iov_len;
10440d5b5a0fSChuck Lever 		buf->tail[0].iov_len = 0;
10450d5b5a0fSChuck Lever 		ecptr = buf->tail[0].iov_base;
10460d5b5a0fSChuck Lever 	}
10470d5b5a0fSChuck Lever 
10480d5b5a0fSChuck Lever 	memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
10490d5b5a0fSChuck Lever 	buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
10500d5b5a0fSChuck Lever 	buf->len += GSS_KRB5_TOK_HDR_LEN;
10510d5b5a0fSChuck Lever 
10520d5b5a0fSChuck Lever 	err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
10530d5b5a0fSChuck Lever 				   offset + GSS_KRB5_TOK_HDR_LEN,
1054e1a9a384SChuck Lever 				   buf, pages, NULL, 0);
10550d5b5a0fSChuck Lever 	if (err)
10560d5b5a0fSChuck Lever 		return GSS_S_FAILURE;
10570d5b5a0fSChuck Lever 
10580d5b5a0fSChuck Lever 	hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
10590d5b5a0fSChuck Lever 	hmac.len = kctx->gk5e->cksumlength;
10600d5b5a0fSChuck Lever 	err = krb5_etm_checksum(cipher, ahash,
10610d5b5a0fSChuck Lever 				buf, offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
10620d5b5a0fSChuck Lever 	if (err)
10630d5b5a0fSChuck Lever 		goto out_err;
10640d5b5a0fSChuck Lever 	buf->tail[0].iov_len += kctx->gk5e->cksumlength;
10650d5b5a0fSChuck Lever 	buf->len += kctx->gk5e->cksumlength;
10660d5b5a0fSChuck Lever 
10670d5b5a0fSChuck Lever 	return GSS_S_COMPLETE;
10680d5b5a0fSChuck Lever 
10690d5b5a0fSChuck Lever out_err:
10700d5b5a0fSChuck Lever 	return GSS_S_FAILURE;
10710d5b5a0fSChuck Lever }
10720d5b5a0fSChuck Lever 
10730d5b5a0fSChuck Lever /**
10740d5b5a0fSChuck Lever  * krb5_etm_decrypt - Decrypt using the RFC 8009 rules
10750d5b5a0fSChuck Lever  * @kctx: Kerberos context
10760d5b5a0fSChuck Lever  * @offset: starting offset of the ciphertext, in bytes
10770d5b5a0fSChuck Lever  * @len:
10780d5b5a0fSChuck Lever  * @buf:
10790d5b5a0fSChuck Lever  * @headskip: OUT: the enctype's confounder length, in octets
10800d5b5a0fSChuck Lever  * @tailskip: OUT: the enctype's HMAC length, in octets
10810d5b5a0fSChuck Lever  *
10820d5b5a0fSChuck Lever  * RFC 8009 Section 5:
10830d5b5a0fSChuck Lever  *
10840d5b5a0fSChuck Lever  * decryption function: as follows, where D() is AES decryption in
10850d5b5a0fSChuck Lever  * CBC-CS3 mode, and h is the size of truncated HMAC.
10860d5b5a0fSChuck Lever  *
10870d5b5a0fSChuck Lever  *    (C, H) = ciphertext
10880d5b5a0fSChuck Lever  *        (Note: H is the last h bits of the ciphertext.)
10890d5b5a0fSChuck Lever  *    IV = cipher state
10900d5b5a0fSChuck Lever  *    if H != HMAC(Ki, IV | C)[1..h]
10910d5b5a0fSChuck Lever  *        stop, report error
10920d5b5a0fSChuck Lever  *    (N, P) = D(Ke, C, IV)
10930d5b5a0fSChuck Lever  *
10940d5b5a0fSChuck Lever  * Return values:
10950d5b5a0fSChuck Lever  *   %GSS_S_COMPLETE: Decryption successful
10960d5b5a0fSChuck Lever  *   %GSS_S_BAD_SIG: computed HMAC != received HMAC
10970d5b5a0fSChuck Lever  *   %GSS_S_FAILURE: Decryption failed
10980d5b5a0fSChuck Lever  */
10990d5b5a0fSChuck Lever u32
krb5_etm_decrypt(struct krb5_ctx * kctx,u32 offset,u32 len,struct xdr_buf * buf,u32 * headskip,u32 * tailskip)11000d5b5a0fSChuck Lever krb5_etm_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
11010d5b5a0fSChuck Lever 		 struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
11020d5b5a0fSChuck Lever {
11030d5b5a0fSChuck Lever 	struct crypto_sync_skcipher *cipher, *aux_cipher;
11040d5b5a0fSChuck Lever 	u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
11050d5b5a0fSChuck Lever 	u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
11060d5b5a0fSChuck Lever 	struct xdr_netobj our_hmac_obj;
11070d5b5a0fSChuck Lever 	struct crypto_ahash *ahash;
11080d5b5a0fSChuck Lever 	struct xdr_buf subbuf;
11090d5b5a0fSChuck Lever 	u32 ret = 0;
11100d5b5a0fSChuck Lever 
11110d5b5a0fSChuck Lever 	if (kctx->initiate) {
11120d5b5a0fSChuck Lever 		cipher = kctx->acceptor_enc;
11130d5b5a0fSChuck Lever 		aux_cipher = kctx->acceptor_enc_aux;
11140d5b5a0fSChuck Lever 		ahash = kctx->acceptor_integ;
11150d5b5a0fSChuck Lever 	} else {
11160d5b5a0fSChuck Lever 		cipher = kctx->initiator_enc;
11170d5b5a0fSChuck Lever 		aux_cipher = kctx->initiator_enc_aux;
11180d5b5a0fSChuck Lever 		ahash = kctx->initiator_integ;
11190d5b5a0fSChuck Lever 	}
11200d5b5a0fSChuck Lever 
11210d5b5a0fSChuck Lever 	/* Extract the ciphertext into @subbuf. */
11220d5b5a0fSChuck Lever 	xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
11230d5b5a0fSChuck Lever 			   (len - offset - GSS_KRB5_TOK_HDR_LEN -
11240d5b5a0fSChuck Lever 			    kctx->gk5e->cksumlength));
11250d5b5a0fSChuck Lever 
11260d5b5a0fSChuck Lever 	our_hmac_obj.data = our_hmac;
11270d5b5a0fSChuck Lever 	our_hmac_obj.len = kctx->gk5e->cksumlength;
11280d5b5a0fSChuck Lever 	ret = krb5_etm_checksum(cipher, ahash, &subbuf, 0, &our_hmac_obj);
11290d5b5a0fSChuck Lever 	if (ret)
11300d5b5a0fSChuck Lever 		goto out_err;
11310d5b5a0fSChuck Lever 	ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
11320d5b5a0fSChuck Lever 				      pkt_hmac, kctx->gk5e->cksumlength);
11330d5b5a0fSChuck Lever 	if (ret)
11340d5b5a0fSChuck Lever 		goto out_err;
11350d5b5a0fSChuck Lever 	if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
11360d5b5a0fSChuck Lever 		ret = GSS_S_BAD_SIG;
11370d5b5a0fSChuck Lever 		goto out_err;
11380d5b5a0fSChuck Lever 	}
11390d5b5a0fSChuck Lever 
11400d5b5a0fSChuck Lever 	ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
11410d5b5a0fSChuck Lever 	if (ret) {
11420d5b5a0fSChuck Lever 		ret = GSS_S_FAILURE;
11430d5b5a0fSChuck Lever 		goto out_err;
11440d5b5a0fSChuck Lever 	}
11450d5b5a0fSChuck Lever 
11460d5b5a0fSChuck Lever 	*headskip = crypto_sync_skcipher_blocksize(cipher);
11470d5b5a0fSChuck Lever 	*tailskip = kctx->gk5e->cksumlength;
11480d5b5a0fSChuck Lever 	return GSS_S_COMPLETE;
11490d5b5a0fSChuck Lever 
11500d5b5a0fSChuck Lever out_err:
11510d5b5a0fSChuck Lever 	if (ret != GSS_S_BAD_SIG)
11520d5b5a0fSChuck Lever 		ret = GSS_S_FAILURE;
11530d5b5a0fSChuck Lever 	return ret;
11540d5b5a0fSChuck Lever }
1155