181d4a433SKevin Coffman /*
281d4a433SKevin Coffman  * COPYRIGHT (c) 2008
381d4a433SKevin Coffman  * The Regents of the University of Michigan
481d4a433SKevin Coffman  * ALL RIGHTS RESERVED
581d4a433SKevin Coffman  *
681d4a433SKevin Coffman  * Permission is granted to use, copy, create derivative works
781d4a433SKevin Coffman  * and redistribute this software and such derivative works
881d4a433SKevin Coffman  * for any purpose, so long as the name of The University of
981d4a433SKevin Coffman  * Michigan is not used in any advertising or publicity
1081d4a433SKevin Coffman  * pertaining to the use of distribution of this software
1181d4a433SKevin Coffman  * without specific, written prior authorization.  If the
1281d4a433SKevin Coffman  * above copyright notice or any other identification of the
1381d4a433SKevin Coffman  * University of Michigan is included in any copy of any
1481d4a433SKevin Coffman  * portion of this software, then the disclaimer below must
1581d4a433SKevin Coffman  * also be included.
1681d4a433SKevin Coffman  *
1781d4a433SKevin Coffman  * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
1881d4a433SKevin Coffman  * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
1981d4a433SKevin Coffman  * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
2081d4a433SKevin Coffman  * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
2181d4a433SKevin Coffman  * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
2281d4a433SKevin Coffman  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
2381d4a433SKevin Coffman  * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
2481d4a433SKevin Coffman  * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
2581d4a433SKevin Coffman  * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
2681d4a433SKevin Coffman  * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
2781d4a433SKevin Coffman  * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
2881d4a433SKevin Coffman  * SUCH DAMAGES.
2981d4a433SKevin Coffman  */
3081d4a433SKevin Coffman 
313b5cf20cSHerbert Xu #include <crypto/skcipher.h>
3214ae162cSJ. Bruce Fields #include <linux/types.h>
3314ae162cSJ. Bruce Fields #include <linux/jiffies.h>
3414ae162cSJ. Bruce Fields #include <linux/sunrpc/gss_krb5.h>
3514ae162cSJ. Bruce Fields #include <linux/random.h>
3614ae162cSJ. Bruce Fields #include <linux/pagemap.h>
3714ae162cSJ. Bruce Fields 
38f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3914ae162cSJ. Bruce Fields # define RPCDBG_FACILITY	RPCDBG_AUTH
4014ae162cSJ. Bruce Fields #endif
4114ae162cSJ. Bruce Fields 
4214ae162cSJ. Bruce Fields static inline int
4314ae162cSJ. Bruce Fields gss_krb5_padding(int blocksize, int length)
4414ae162cSJ. Bruce Fields {
4554ec3d46SJ. Bruce Fields 	return blocksize - (length % blocksize);
4614ae162cSJ. Bruce Fields }
4714ae162cSJ. Bruce Fields 
4814ae162cSJ. Bruce Fields static inline void
4914ae162cSJ. Bruce Fields gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
5014ae162cSJ. Bruce Fields {
5114ae162cSJ. Bruce Fields 	int padding = gss_krb5_padding(blocksize, buf->len - offset);
5214ae162cSJ. Bruce Fields 	char *p;
5314ae162cSJ. Bruce Fields 	struct kvec *iov;
5414ae162cSJ. Bruce Fields 
5514ae162cSJ. Bruce Fields 	if (buf->page_len || buf->tail[0].iov_len)
5614ae162cSJ. Bruce Fields 		iov = &buf->tail[0];
5714ae162cSJ. Bruce Fields 	else
5814ae162cSJ. Bruce Fields 		iov = &buf->head[0];
5914ae162cSJ. Bruce Fields 	p = iov->iov_base + iov->iov_len;
6014ae162cSJ. Bruce Fields 	iov->iov_len += padding;
6114ae162cSJ. Bruce Fields 	buf->len += padding;
6214ae162cSJ. Bruce Fields 	memset(p, padding, padding);
6314ae162cSJ. Bruce Fields }
6414ae162cSJ. Bruce Fields 
6514ae162cSJ. Bruce Fields static inline int
6614ae162cSJ. Bruce Fields gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
6714ae162cSJ. Bruce Fields {
6814ae162cSJ. Bruce Fields 	u8 *ptr;
6914ae162cSJ. Bruce Fields 	u8 pad;
7067f97d83SChuck Lever 	size_t len = buf->len;
7114ae162cSJ. Bruce Fields 
7214ae162cSJ. Bruce Fields 	if (len <= buf->head[0].iov_len) {
7314ae162cSJ. Bruce Fields 		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
7414ae162cSJ. Bruce Fields 		if (pad > buf->head[0].iov_len)
7514ae162cSJ. Bruce Fields 			return -EINVAL;
7614ae162cSJ. Bruce Fields 		buf->head[0].iov_len -= pad;
7714ae162cSJ. Bruce Fields 		goto out;
7814ae162cSJ. Bruce Fields 	} else
7914ae162cSJ. Bruce Fields 		len -= buf->head[0].iov_len;
8014ae162cSJ. Bruce Fields 	if (len <= buf->page_len) {
8167f97d83SChuck Lever 		unsigned int last = (buf->page_base + len - 1)
8209cbfeafSKirill A. Shutemov 					>>PAGE_SHIFT;
8367f97d83SChuck Lever 		unsigned int offset = (buf->page_base + len - 1)
8409cbfeafSKirill A. Shutemov 					& (PAGE_SIZE - 1);
85b8541786SCong Wang 		ptr = kmap_atomic(buf->pages[last]);
8614ae162cSJ. Bruce Fields 		pad = *(ptr + offset);
87b8541786SCong Wang 		kunmap_atomic(ptr);
8814ae162cSJ. Bruce Fields 		goto out;
8914ae162cSJ. Bruce Fields 	} else
9014ae162cSJ. Bruce Fields 		len -= buf->page_len;
9114ae162cSJ. Bruce Fields 	BUG_ON(len > buf->tail[0].iov_len);
9214ae162cSJ. Bruce Fields 	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
9314ae162cSJ. Bruce Fields out:
9414ae162cSJ. Bruce Fields 	/* XXX: NOTE: we do not adjust the page lengths--they represent
9514ae162cSJ. Bruce Fields 	 * a range of data in the real filesystem page cache, and we need
9614ae162cSJ. Bruce Fields 	 * to know that range so the xdr code can properly place read data.
9714ae162cSJ. Bruce Fields 	 * However adjusting the head length, as we do above, is harmless.
9814ae162cSJ. Bruce Fields 	 * In the case of a request that fits into a single page, the server
9914ae162cSJ. Bruce Fields 	 * also uses length and head length together to determine the original
10014ae162cSJ. Bruce Fields 	 * start of the request to copy the request for deferal; so it's
10114ae162cSJ. Bruce Fields 	 * easier on the server if we adjust head and tail length in tandem.
10214ae162cSJ. Bruce Fields 	 * It's not really a problem that we don't fool with the page and
10314ae162cSJ. Bruce Fields 	 * tail lengths, though--at worst badly formed xdr might lead the
10414ae162cSJ. Bruce Fields 	 * server to attempt to parse the padding.
10514ae162cSJ. Bruce Fields 	 * XXX: Document all these weird requirements for gss mechanism
10614ae162cSJ. Bruce Fields 	 * wrap/unwrap functions. */
10714ae162cSJ. Bruce Fields 	if (pad > blocksize)
10814ae162cSJ. Bruce Fields 		return -EINVAL;
10914ae162cSJ. Bruce Fields 	if (buf->len > pad)
11014ae162cSJ. Bruce Fields 		buf->len -= pad;
11114ae162cSJ. Bruce Fields 	else
11214ae162cSJ. Bruce Fields 		return -EINVAL;
11314ae162cSJ. Bruce Fields 	return 0;
11414ae162cSJ. Bruce Fields }
11514ae162cSJ. Bruce Fields 
116934a95aaSKevin Coffman void
117934a95aaSKevin Coffman gss_krb5_make_confounder(char *p, u32 conflen)
11814ae162cSJ. Bruce Fields {
11914ae162cSJ. Bruce Fields 	static u64 i = 0;
12014ae162cSJ. Bruce Fields 	u64 *q = (u64 *)p;
12114ae162cSJ. Bruce Fields 
12214ae162cSJ. Bruce Fields 	/* rfc1964 claims this should be "random".  But all that's really
12314ae162cSJ. Bruce Fields 	 * necessary is that it be unique.  And not even that is necessary in
12414ae162cSJ. Bruce Fields 	 * our case since our "gssapi" implementation exists only to support
12514ae162cSJ. Bruce Fields 	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
12614ae162cSJ. Bruce Fields 	 * already begin with a unique sequence number.  Just to hedge my bets
12714ae162cSJ. Bruce Fields 	 * I'll make a half-hearted attempt at something unique, but ensuring
12814ae162cSJ. Bruce Fields 	 * uniqueness would mean worrying about atomicity and rollover, and I
12914ae162cSJ. Bruce Fields 	 * don't care enough. */
13014ae162cSJ. Bruce Fields 
131863a2488SKevin Coffman 	/* initialize to random value */
132863a2488SKevin Coffman 	if (i == 0) {
133c86d2ddeSAkinobu Mita 		i = prandom_u32();
134c86d2ddeSAkinobu Mita 		i = (i << 32) | prandom_u32();
135863a2488SKevin Coffman 	}
136863a2488SKevin Coffman 
137863a2488SKevin Coffman 	switch (conflen) {
138863a2488SKevin Coffman 	case 16:
139863a2488SKevin Coffman 		*q++ = i++;
140863a2488SKevin Coffman 		/* fall through */
141863a2488SKevin Coffman 	case 8:
142863a2488SKevin Coffman 		*q++ = i++;
143863a2488SKevin Coffman 		break;
144863a2488SKevin Coffman 	default:
145863a2488SKevin Coffman 		BUG();
146863a2488SKevin Coffman 	}
14714ae162cSJ. Bruce Fields }
14814ae162cSJ. Bruce Fields 
14914ae162cSJ. Bruce Fields /* Assumptions: the head and tail of inbuf are ours to play with.
15014ae162cSJ. Bruce Fields  * The pages, however, may be real pages in the page cache and we replace
15114ae162cSJ. Bruce Fields  * them with scratch pages from **pages before writing to them. */
15214ae162cSJ. Bruce Fields /* XXX: obviously the above should be documentation of wrap interface,
15314ae162cSJ. Bruce Fields  * and shouldn't be in this kerberos-specific file. */
15414ae162cSJ. Bruce Fields 
15514ae162cSJ. Bruce Fields /* XXX factor out common code with seal/unseal. */
15614ae162cSJ. Bruce Fields 
1571ac3719aSKevin Coffman static u32
1581ac3719aSKevin Coffman gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
15914ae162cSJ. Bruce Fields 		struct xdr_buf *buf, struct page **pages)
16014ae162cSJ. Bruce Fields {
16181d4a433SKevin Coffman 	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
16281d4a433SKevin Coffman 	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
16381d4a433SKevin Coffman 					    .data = cksumdata};
16414ae162cSJ. Bruce Fields 	int			blocksize = 0, plainlen;
165d00953a5SKevin Coffman 	unsigned char		*ptr, *msg_start;
16614ae162cSJ. Bruce Fields 	s32			now;
16714ae162cSJ. Bruce Fields 	int			headlen;
16814ae162cSJ. Bruce Fields 	struct page		**tmp_pages;
169eaa82edfSJ. Bruce Fields 	u32			seq_send;
170e1f6c07bSKevin Coffman 	u8			*cksumkey;
1715af46547SKevin Coffman 	u32			conflen = kctx->gk5e->conflen;
17214ae162cSJ. Bruce Fields 
17381d4a433SKevin Coffman 	dprintk("RPC:       %s\n", __func__);
17414ae162cSJ. Bruce Fields 
17514ae162cSJ. Bruce Fields 	now = get_seconds();
17614ae162cSJ. Bruce Fields 
1773b5cf20cSHerbert Xu 	blocksize = crypto_skcipher_blocksize(kctx->enc);
17814ae162cSJ. Bruce Fields 	gss_krb5_add_padding(buf, offset, blocksize);
17914ae162cSJ. Bruce Fields 	BUG_ON((buf->len - offset) % blocksize);
1805af46547SKevin Coffman 	plainlen = conflen + buf->len - offset;
18114ae162cSJ. Bruce Fields 
18281d4a433SKevin Coffman 	headlen = g_token_size(&kctx->mech_used,
18381d4a433SKevin Coffman 		GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
18414ae162cSJ. Bruce Fields 		(buf->len - offset);
18514ae162cSJ. Bruce Fields 
18614ae162cSJ. Bruce Fields 	ptr = buf->head[0].iov_base + offset;
18714ae162cSJ. Bruce Fields 	/* shift data to make room for header. */
188725f2865SKevin Coffman 	xdr_extend_head(buf, offset, headlen);
189725f2865SKevin Coffman 
19014ae162cSJ. Bruce Fields 	/* XXX Would be cleverer to encrypt while copying. */
19114ae162cSJ. Bruce Fields 	BUG_ON((buf->len - offset - headlen) % blocksize);
19214ae162cSJ. Bruce Fields 
193d00953a5SKevin Coffman 	g_make_token_header(&kctx->mech_used,
19481d4a433SKevin Coffman 				GSS_KRB5_TOK_HDR_LEN +
19581d4a433SKevin Coffman 				kctx->gk5e->cksumlength + plainlen, &ptr);
19614ae162cSJ. Bruce Fields 
19714ae162cSJ. Bruce Fields 
198d00953a5SKevin Coffman 	/* ptr now at header described in rfc 1964, section 1.2.1: */
199d00953a5SKevin Coffman 	ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
200d00953a5SKevin Coffman 	ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
20114ae162cSJ. Bruce Fields 
20281d4a433SKevin Coffman 	msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
20314ae162cSJ. Bruce Fields 
204b36e9c44SJeff Layton 	/*
205b36e9c44SJeff Layton 	 * signalg and sealalg are stored as if they were converted from LE
206b36e9c44SJeff Layton 	 * to host endian, even though they're opaque pairs of bytes according
207b36e9c44SJeff Layton 	 * to the RFC.
208b36e9c44SJeff Layton 	 */
209b36e9c44SJeff Layton 	*(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
210b36e9c44SJeff Layton 	*(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
211b36e9c44SJeff Layton 	ptr[6] = 0xff;
212b36e9c44SJeff Layton 	ptr[7] = 0xff;
21314ae162cSJ. Bruce Fields 
2145af46547SKevin Coffman 	gss_krb5_make_confounder(msg_start, conflen);
21514ae162cSJ. Bruce Fields 
216e1f6c07bSKevin Coffman 	if (kctx->gk5e->keyed_cksum)
217e1f6c07bSKevin Coffman 		cksumkey = kctx->cksum;
218e1f6c07bSKevin Coffman 	else
219e1f6c07bSKevin Coffman 		cksumkey = NULL;
220e1f6c07bSKevin Coffman 
22114ae162cSJ. Bruce Fields 	/* XXXJBF: UGH!: */
22214ae162cSJ. Bruce Fields 	tmp_pages = buf->pages;
22314ae162cSJ. Bruce Fields 	buf->pages = pages;
2245af46547SKevin Coffman 	if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
2258b237076SKevin Coffman 					cksumkey, KG_USAGE_SEAL, &md5cksum))
22639a21dd1SJ. Bruce Fields 		return GSS_S_FAILURE;
22714ae162cSJ. Bruce Fields 	buf->pages = tmp_pages;
22814ae162cSJ. Bruce Fields 
229e1f6c07bSKevin Coffman 	memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
23014ae162cSJ. Bruce Fields 
231c3be6577SPaul Burton 	seq_send = atomic_fetch_inc(&kctx->seq_send);
232eaa82edfSJ. Bruce Fields 
23314ae162cSJ. Bruce Fields 	/* XXX would probably be more efficient to compute checksum
23414ae162cSJ. Bruce Fields 	 * and encrypt at the same time: */
2351dbd9029SKevin Coffman 	if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
236d00953a5SKevin Coffman 			       seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
23739a21dd1SJ. Bruce Fields 		return GSS_S_FAILURE;
23814ae162cSJ. Bruce Fields 
239fffdaef2SKevin Coffman 	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
2403b5cf20cSHerbert Xu 		struct crypto_skcipher *cipher;
241fffdaef2SKevin Coffman 		int err;
2423b5cf20cSHerbert Xu 		cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
243fffdaef2SKevin Coffman 					       CRYPTO_ALG_ASYNC);
244fffdaef2SKevin Coffman 		if (IS_ERR(cipher))
24539a21dd1SJ. Bruce Fields 			return GSS_S_FAILURE;
24614ae162cSJ. Bruce Fields 
247fffdaef2SKevin Coffman 		krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
248fffdaef2SKevin Coffman 
249fffdaef2SKevin Coffman 		err = gss_encrypt_xdr_buf(cipher, buf,
250fffdaef2SKevin Coffman 					  offset + headlen - conflen, pages);
2513b5cf20cSHerbert Xu 		crypto_free_skcipher(cipher);
252fffdaef2SKevin Coffman 		if (err)
253fffdaef2SKevin Coffman 			return GSS_S_FAILURE;
254fffdaef2SKevin Coffman 	} else {
255fffdaef2SKevin Coffman 		if (gss_encrypt_xdr_buf(kctx->enc, buf,
256fffdaef2SKevin Coffman 					offset + headlen - conflen, pages))
257fffdaef2SKevin Coffman 			return GSS_S_FAILURE;
258fffdaef2SKevin Coffman 	}
259fffdaef2SKevin Coffman 
26094efa934SJ. Bruce Fields 	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
26114ae162cSJ. Bruce Fields }
26214ae162cSJ. Bruce Fields 
2631ac3719aSKevin Coffman static u32
2641ac3719aSKevin Coffman gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
26514ae162cSJ. Bruce Fields {
26614ae162cSJ. Bruce Fields 	int			signalg;
26714ae162cSJ. Bruce Fields 	int			sealalg;
26881d4a433SKevin Coffman 	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
26981d4a433SKevin Coffman 	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
27081d4a433SKevin Coffman 					    .data = cksumdata};
27114ae162cSJ. Bruce Fields 	s32			now;
27214ae162cSJ. Bruce Fields 	int			direction;
27314ae162cSJ. Bruce Fields 	s32			seqnum;
27414ae162cSJ. Bruce Fields 	unsigned char		*ptr;
27514ae162cSJ. Bruce Fields 	int			bodysize;
27614ae162cSJ. Bruce Fields 	void			*data_start, *orig_start;
27714ae162cSJ. Bruce Fields 	int			data_len;
27814ae162cSJ. Bruce Fields 	int			blocksize;
2795af46547SKevin Coffman 	u32			conflen = kctx->gk5e->conflen;
28081d4a433SKevin Coffman 	int			crypt_offset;
281e1f6c07bSKevin Coffman 	u8			*cksumkey;
28214ae162cSJ. Bruce Fields 
28314ae162cSJ. Bruce Fields 	dprintk("RPC:       gss_unwrap_kerberos\n");
28414ae162cSJ. Bruce Fields 
28514ae162cSJ. Bruce Fields 	ptr = (u8 *)buf->head[0].iov_base + offset;
28614ae162cSJ. Bruce Fields 	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
28714ae162cSJ. Bruce Fields 					buf->len - offset))
28839a21dd1SJ. Bruce Fields 		return GSS_S_DEFECTIVE_TOKEN;
28914ae162cSJ. Bruce Fields 
290d00953a5SKevin Coffman 	if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
291d00953a5SKevin Coffman 	    (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
29239a21dd1SJ. Bruce Fields 		return GSS_S_DEFECTIVE_TOKEN;
29314ae162cSJ. Bruce Fields 
29414ae162cSJ. Bruce Fields 	/* XXX sanity-check bodysize?? */
29514ae162cSJ. Bruce Fields 
29614ae162cSJ. Bruce Fields 	/* get the sign and seal algorithms */
29714ae162cSJ. Bruce Fields 
298d00953a5SKevin Coffman 	signalg = ptr[2] + (ptr[3] << 8);
29981d4a433SKevin Coffman 	if (signalg != kctx->gk5e->signalg)
30039a21dd1SJ. Bruce Fields 		return GSS_S_DEFECTIVE_TOKEN;
30114ae162cSJ. Bruce Fields 
302d00953a5SKevin Coffman 	sealalg = ptr[4] + (ptr[5] << 8);
30381d4a433SKevin Coffman 	if (sealalg != kctx->gk5e->sealalg)
30439a21dd1SJ. Bruce Fields 		return GSS_S_DEFECTIVE_TOKEN;
30594efa934SJ. Bruce Fields 
306d00953a5SKevin Coffman 	if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
30739a21dd1SJ. Bruce Fields 		return GSS_S_DEFECTIVE_TOKEN;
30814ae162cSJ. Bruce Fields 
30981d4a433SKevin Coffman 	/*
31081d4a433SKevin Coffman 	 * Data starts after token header and checksum.  ptr points
31181d4a433SKevin Coffman 	 * to the beginning of the token header
31281d4a433SKevin Coffman 	 */
31381d4a433SKevin Coffman 	crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
31481d4a433SKevin Coffman 					(unsigned char *)buf->head[0].iov_base;
315fffdaef2SKevin Coffman 
316fffdaef2SKevin Coffman 	/*
317fffdaef2SKevin Coffman 	 * Need plaintext seqnum to derive encryption key for arcfour-hmac
318fffdaef2SKevin Coffman 	 */
319fffdaef2SKevin Coffman 	if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
320fffdaef2SKevin Coffman 			     ptr + 8, &direction, &seqnum))
321fffdaef2SKevin Coffman 		return GSS_S_BAD_SIG;
322fffdaef2SKevin Coffman 
323fffdaef2SKevin Coffman 	if ((kctx->initiate && direction != 0xff) ||
324fffdaef2SKevin Coffman 	    (!kctx->initiate && direction != 0))
325fffdaef2SKevin Coffman 		return GSS_S_BAD_SIG;
326fffdaef2SKevin Coffman 
327fffdaef2SKevin Coffman 	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
3283b5cf20cSHerbert Xu 		struct crypto_skcipher *cipher;
329fffdaef2SKevin Coffman 		int err;
330fffdaef2SKevin Coffman 
3313b5cf20cSHerbert Xu 		cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
332fffdaef2SKevin Coffman 					       CRYPTO_ALG_ASYNC);
333fffdaef2SKevin Coffman 		if (IS_ERR(cipher))
334fffdaef2SKevin Coffman 			return GSS_S_FAILURE;
335fffdaef2SKevin Coffman 
336fffdaef2SKevin Coffman 		krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
337fffdaef2SKevin Coffman 
338fffdaef2SKevin Coffman 		err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
3393b5cf20cSHerbert Xu 		crypto_free_skcipher(cipher);
340fffdaef2SKevin Coffman 		if (err)
341fffdaef2SKevin Coffman 			return GSS_S_DEFECTIVE_TOKEN;
342fffdaef2SKevin Coffman 	} else {
34381d4a433SKevin Coffman 		if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
34439a21dd1SJ. Bruce Fields 			return GSS_S_DEFECTIVE_TOKEN;
345fffdaef2SKevin Coffman 	}
34614ae162cSJ. Bruce Fields 
347e1f6c07bSKevin Coffman 	if (kctx->gk5e->keyed_cksum)
348e1f6c07bSKevin Coffman 		cksumkey = kctx->cksum;
349e1f6c07bSKevin Coffman 	else
350e1f6c07bSKevin Coffman 		cksumkey = NULL;
351e1f6c07bSKevin Coffman 
352e1f6c07bSKevin Coffman 	if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
3538b237076SKevin Coffman 					cksumkey, KG_USAGE_SEAL, &md5cksum))
35439a21dd1SJ. Bruce Fields 		return GSS_S_FAILURE;
35514ae162cSJ. Bruce Fields 
356e1f6c07bSKevin Coffman 	if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
357e1f6c07bSKevin Coffman 						kctx->gk5e->cksumlength))
35839a21dd1SJ. Bruce Fields 		return GSS_S_BAD_SIG;
35914ae162cSJ. Bruce Fields 
36014ae162cSJ. Bruce Fields 	/* it got through unscathed.  Make sure the context is unexpired */
36114ae162cSJ. Bruce Fields 
36214ae162cSJ. Bruce Fields 	now = get_seconds();
36314ae162cSJ. Bruce Fields 
36414ae162cSJ. Bruce Fields 	if (now > kctx->endtime)
36539a21dd1SJ. Bruce Fields 		return GSS_S_CONTEXT_EXPIRED;
36614ae162cSJ. Bruce Fields 
36714ae162cSJ. Bruce Fields 	/* do sequencing checks */
36814ae162cSJ. Bruce Fields 
36914ae162cSJ. Bruce Fields 	/* Copy the data back to the right position.  XXX: Would probably be
37014ae162cSJ. Bruce Fields 	 * better to copy and encrypt at the same time. */
37114ae162cSJ. Bruce Fields 
3723b5cf20cSHerbert Xu 	blocksize = crypto_skcipher_blocksize(kctx->enc);
37381d4a433SKevin Coffman 	data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
3745af46547SKevin Coffman 					conflen;
37514ae162cSJ. Bruce Fields 	orig_start = buf->head[0].iov_base + offset;
37614ae162cSJ. Bruce Fields 	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
37714ae162cSJ. Bruce Fields 	memmove(orig_start, data_start, data_len);
37814ae162cSJ. Bruce Fields 	buf->head[0].iov_len -= (data_start - orig_start);
37914ae162cSJ. Bruce Fields 	buf->len -= (data_start - orig_start);
38014ae162cSJ. Bruce Fields 
38114ae162cSJ. Bruce Fields 	if (gss_krb5_remove_padding(buf, blocksize))
38239a21dd1SJ. Bruce Fields 		return GSS_S_DEFECTIVE_TOKEN;
38314ae162cSJ. Bruce Fields 
38439a21dd1SJ. Bruce Fields 	return GSS_S_COMPLETE;
38514ae162cSJ. Bruce Fields }
3861ac3719aSKevin Coffman 
387de9c17ebSKevin Coffman /*
388c52226daSJ. Bruce Fields  * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
389c52226daSJ. Bruce Fields  * to do more than that, we shift repeatedly.  Kevin Coffman reports
390c52226daSJ. Bruce Fields  * seeing 28 bytes as the value used by Microsoft clients and servers
391c52226daSJ. Bruce Fields  * with AES, so this constant is chosen to allow handling 28 in one pass
392c52226daSJ. Bruce Fields  * without using too much stack space.
393c52226daSJ. Bruce Fields  *
394c52226daSJ. Bruce Fields  * If that proves to a problem perhaps we could use a more clever
395c52226daSJ. Bruce Fields  * algorithm.
396de9c17ebSKevin Coffman  */
397c52226daSJ. Bruce Fields #define LOCAL_BUF_LEN 32u
398c52226daSJ. Bruce Fields 
399c52226daSJ. Bruce Fields static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
400de9c17ebSKevin Coffman {
401c52226daSJ. Bruce Fields 	char head[LOCAL_BUF_LEN];
402c52226daSJ. Bruce Fields 	char tmp[LOCAL_BUF_LEN];
403c52226daSJ. Bruce Fields 	unsigned int this_len, i;
404de9c17ebSKevin Coffman 
405c52226daSJ. Bruce Fields 	BUG_ON(shift > LOCAL_BUF_LEN);
406de9c17ebSKevin Coffman 
407c52226daSJ. Bruce Fields 	read_bytes_from_xdr_buf(buf, 0, head, shift);
408c52226daSJ. Bruce Fields 	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
409c52226daSJ. Bruce Fields 		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
410c52226daSJ. Bruce Fields 		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
411c52226daSJ. Bruce Fields 		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
412c52226daSJ. Bruce Fields 	}
413c52226daSJ. Bruce Fields 	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
414c52226daSJ. Bruce Fields }
415c52226daSJ. Bruce Fields 
416c52226daSJ. Bruce Fields static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
417c52226daSJ. Bruce Fields {
418c52226daSJ. Bruce Fields 	int shifted = 0;
419c52226daSJ. Bruce Fields 	int this_shift;
420c52226daSJ. Bruce Fields 
421c52226daSJ. Bruce Fields 	shift %= buf->len;
422c52226daSJ. Bruce Fields 	while (shifted < shift) {
423c52226daSJ. Bruce Fields 		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
424c52226daSJ. Bruce Fields 		rotate_buf_a_little(buf, this_shift);
425c52226daSJ. Bruce Fields 		shifted += this_shift;
426c52226daSJ. Bruce Fields 	}
427c52226daSJ. Bruce Fields }
428c52226daSJ. Bruce Fields 
429c52226daSJ. Bruce Fields static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
430c52226daSJ. Bruce Fields {
431c52226daSJ. Bruce Fields 	struct xdr_buf subbuf;
432c52226daSJ. Bruce Fields 
433c52226daSJ. Bruce Fields 	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
434c52226daSJ. Bruce Fields 	_rotate_left(&subbuf, shift);
435de9c17ebSKevin Coffman }
436de9c17ebSKevin Coffman 
437de9c17ebSKevin Coffman static u32
438de9c17ebSKevin Coffman gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
439de9c17ebSKevin Coffman 		     struct xdr_buf *buf, struct page **pages)
440de9c17ebSKevin Coffman {
441de9c17ebSKevin Coffman 	u8		*ptr, *plainhdr;
442de9c17ebSKevin Coffman 	s32		now;
443de9c17ebSKevin Coffman 	u8		flags = 0x00;
444b36e9c44SJeff Layton 	__be16		*be16ptr;
445de9c17ebSKevin Coffman 	__be64		*be64ptr;
446de9c17ebSKevin Coffman 	u32		err;
447de9c17ebSKevin Coffman 
448de9c17ebSKevin Coffman 	dprintk("RPC:       %s\n", __func__);
449de9c17ebSKevin Coffman 
450de9c17ebSKevin Coffman 	if (kctx->gk5e->encrypt_v2 == NULL)
451de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
452de9c17ebSKevin Coffman 
453de9c17ebSKevin Coffman 	/* make room for gss token header */
454de9c17ebSKevin Coffman 	if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
455de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
456de9c17ebSKevin Coffman 
457de9c17ebSKevin Coffman 	/* construct gss token header */
458de9c17ebSKevin Coffman 	ptr = plainhdr = buf->head[0].iov_base + offset;
459de9c17ebSKevin Coffman 	*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
460de9c17ebSKevin Coffman 	*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
461de9c17ebSKevin Coffman 
462de9c17ebSKevin Coffman 	if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
463de9c17ebSKevin Coffman 		flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
464de9c17ebSKevin Coffman 	if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
465de9c17ebSKevin Coffman 		flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
466de9c17ebSKevin Coffman 	/* We always do confidentiality in wrap tokens */
467de9c17ebSKevin Coffman 	flags |= KG2_TOKEN_FLAG_SEALED;
468de9c17ebSKevin Coffman 
469de9c17ebSKevin Coffman 	*ptr++ = flags;
470de9c17ebSKevin Coffman 	*ptr++ = 0xff;
471de9c17ebSKevin Coffman 	be16ptr = (__be16 *)ptr;
472de9c17ebSKevin Coffman 
473b36e9c44SJeff Layton 	*be16ptr++ = 0;
474de9c17ebSKevin Coffman 	/* "inner" token header always uses 0 for RRC */
475b36e9c44SJeff Layton 	*be16ptr++ = 0;
476de9c17ebSKevin Coffman 
477de9c17ebSKevin Coffman 	be64ptr = (__be64 *)be16ptr;
478c3be6577SPaul Burton 	*be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
479de9c17ebSKevin Coffman 
480ec25422cSJeff Layton 	err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
481de9c17ebSKevin Coffman 	if (err)
482de9c17ebSKevin Coffman 		return err;
483de9c17ebSKevin Coffman 
484de9c17ebSKevin Coffman 	now = get_seconds();
485de9c17ebSKevin Coffman 	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
486de9c17ebSKevin Coffman }
487de9c17ebSKevin Coffman 
488de9c17ebSKevin Coffman static u32
489de9c17ebSKevin Coffman gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
490de9c17ebSKevin Coffman {
491de9c17ebSKevin Coffman 	s32		now;
492de9c17ebSKevin Coffman 	u8		*ptr;
493de9c17ebSKevin Coffman 	u8		flags = 0x00;
494de9c17ebSKevin Coffman 	u16		ec, rrc;
495de9c17ebSKevin Coffman 	int		err;
496de9c17ebSKevin Coffman 	u32		headskip, tailskip;
497de9c17ebSKevin Coffman 	u8		decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
498de9c17ebSKevin Coffman 	unsigned int	movelen;
499de9c17ebSKevin Coffman 
500de9c17ebSKevin Coffman 
501de9c17ebSKevin Coffman 	dprintk("RPC:       %s\n", __func__);
502de9c17ebSKevin Coffman 
503de9c17ebSKevin Coffman 	if (kctx->gk5e->decrypt_v2 == NULL)
504de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
505de9c17ebSKevin Coffman 
506de9c17ebSKevin Coffman 	ptr = buf->head[0].iov_base + offset;
507de9c17ebSKevin Coffman 
508de9c17ebSKevin Coffman 	if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
509de9c17ebSKevin Coffman 		return GSS_S_DEFECTIVE_TOKEN;
510de9c17ebSKevin Coffman 
511de9c17ebSKevin Coffman 	flags = ptr[2];
512de9c17ebSKevin Coffman 	if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
513de9c17ebSKevin Coffman 	    (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
514de9c17ebSKevin Coffman 		return GSS_S_BAD_SIG;
515de9c17ebSKevin Coffman 
516de9c17ebSKevin Coffman 	if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
517de9c17ebSKevin Coffman 		dprintk("%s: token missing expected sealed flag\n", __func__);
518de9c17ebSKevin Coffman 		return GSS_S_DEFECTIVE_TOKEN;
519de9c17ebSKevin Coffman 	}
520de9c17ebSKevin Coffman 
521de9c17ebSKevin Coffman 	if (ptr[3] != 0xff)
522de9c17ebSKevin Coffman 		return GSS_S_DEFECTIVE_TOKEN;
523de9c17ebSKevin Coffman 
524de9c17ebSKevin Coffman 	ec = be16_to_cpup((__be16 *)(ptr + 4));
525de9c17ebSKevin Coffman 	rrc = be16_to_cpup((__be16 *)(ptr + 6));
526de9c17ebSKevin Coffman 
5275d6baef9SJ. Bruce Fields 	/*
5285d6baef9SJ. Bruce Fields 	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
5295d6baef9SJ. Bruce Fields 	 * doesn't want it checked; see page 6 of rfc 2203.
5305d6baef9SJ. Bruce Fields 	 */
531de9c17ebSKevin Coffman 
532c52226daSJ. Bruce Fields 	if (rrc != 0)
533c52226daSJ. Bruce Fields 		rotate_left(offset + 16, buf, rrc);
534de9c17ebSKevin Coffman 
535de9c17ebSKevin Coffman 	err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
536de9c17ebSKevin Coffman 					&headskip, &tailskip);
537de9c17ebSKevin Coffman 	if (err)
538de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
539de9c17ebSKevin Coffman 
540de9c17ebSKevin Coffman 	/*
541de9c17ebSKevin Coffman 	 * Retrieve the decrypted gss token header and verify
542de9c17ebSKevin Coffman 	 * it against the original
543de9c17ebSKevin Coffman 	 */
544de9c17ebSKevin Coffman 	err = read_bytes_from_xdr_buf(buf,
545de9c17ebSKevin Coffman 				buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
546de9c17ebSKevin Coffman 				decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
547de9c17ebSKevin Coffman 	if (err) {
548de9c17ebSKevin Coffman 		dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
549de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
550de9c17ebSKevin Coffman 	}
551de9c17ebSKevin Coffman 	if (memcmp(ptr, decrypted_hdr, 6)
552de9c17ebSKevin Coffman 				|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
553de9c17ebSKevin Coffman 		dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
554de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
555de9c17ebSKevin Coffman 	}
556de9c17ebSKevin Coffman 
557de9c17ebSKevin Coffman 	/* do sequencing checks */
558de9c17ebSKevin Coffman 
559de9c17ebSKevin Coffman 	/* it got through unscathed.  Make sure the context is unexpired */
560de9c17ebSKevin Coffman 	now = get_seconds();
561de9c17ebSKevin Coffman 	if (now > kctx->endtime)
562de9c17ebSKevin Coffman 		return GSS_S_CONTEXT_EXPIRED;
563de9c17ebSKevin Coffman 
564de9c17ebSKevin Coffman 	/*
565de9c17ebSKevin Coffman 	 * Move the head data back to the right position in xdr_buf.
566de9c17ebSKevin Coffman 	 * We ignore any "ec" data since it might be in the head or
567de9c17ebSKevin Coffman 	 * the tail, and we really don't need to deal with it.
568de9c17ebSKevin Coffman 	 * Note that buf->head[0].iov_len may indicate the available
569de9c17ebSKevin Coffman 	 * head buffer space rather than that actually occupied.
570de9c17ebSKevin Coffman 	 */
571de9c17ebSKevin Coffman 	movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
572de9c17ebSKevin Coffman 	movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
573de9c17ebSKevin Coffman 	BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
574de9c17ebSKevin Coffman 							buf->head[0].iov_len);
575de9c17ebSKevin Coffman 	memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
576de9c17ebSKevin Coffman 	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
577de9c17ebSKevin Coffman 	buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
578de9c17ebSKevin Coffman 
579cf4c024bSJeff Layton 	/* Trim off the trailing "extra count" and checksum blob */
580cf4c024bSJeff Layton 	xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
581de9c17ebSKevin Coffman 	return GSS_S_COMPLETE;
582de9c17ebSKevin Coffman }
583de9c17ebSKevin Coffman 
5841ac3719aSKevin Coffman u32
5851ac3719aSKevin Coffman gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
5861ac3719aSKevin Coffman 		  struct xdr_buf *buf, struct page **pages)
5871ac3719aSKevin Coffman {
5881ac3719aSKevin Coffman 	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
5891ac3719aSKevin Coffman 
5901ac3719aSKevin Coffman 	switch (kctx->enctype) {
5911ac3719aSKevin Coffman 	default:
5921ac3719aSKevin Coffman 		BUG();
5931ac3719aSKevin Coffman 	case ENCTYPE_DES_CBC_RAW:
594958142e9SKevin Coffman 	case ENCTYPE_DES3_CBC_RAW:
595fffdaef2SKevin Coffman 	case ENCTYPE_ARCFOUR_HMAC:
5961ac3719aSKevin Coffman 		return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
597de9c17ebSKevin Coffman 	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
598de9c17ebSKevin Coffman 	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
599de9c17ebSKevin Coffman 		return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
6001ac3719aSKevin Coffman 	}
6011ac3719aSKevin Coffman }
6021ac3719aSKevin Coffman 
6031ac3719aSKevin Coffman u32
6041ac3719aSKevin Coffman gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
6051ac3719aSKevin Coffman {
6061ac3719aSKevin Coffman 	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
6071ac3719aSKevin Coffman 
6081ac3719aSKevin Coffman 	switch (kctx->enctype) {
6091ac3719aSKevin Coffman 	default:
6101ac3719aSKevin Coffman 		BUG();
6111ac3719aSKevin Coffman 	case ENCTYPE_DES_CBC_RAW:
612958142e9SKevin Coffman 	case ENCTYPE_DES3_CBC_RAW:
613fffdaef2SKevin Coffman 	case ENCTYPE_ARCFOUR_HMAC:
6141ac3719aSKevin Coffman 		return gss_unwrap_kerberos_v1(kctx, offset, buf);
615de9c17ebSKevin Coffman 	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
616de9c17ebSKevin Coffman 	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
617de9c17ebSKevin Coffman 		return gss_unwrap_kerberos_v2(kctx, offset, buf);
6181ac3719aSKevin Coffman 	}
6191ac3719aSKevin Coffman }
620