1 /*
2  * COPYRIGHT (c) 2008
3  * The Regents of the University of Michigan
4  * ALL RIGHTS RESERVED
5  *
6  * Permission is granted to use, copy, create derivative works
7  * and redistribute this software and such derivative works
8  * for any purpose, so long as the name of The University of
9  * Michigan is not used in any advertising or publicity
10  * pertaining to the use of distribution of this software
11  * without specific, written prior authorization.  If the
12  * above copyright notice or any other identification of the
13  * University of Michigan is included in any copy of any
14  * portion of this software, then the disclaimer below must
15  * also be included.
16  *
17  * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18  * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19  * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20  * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21  * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23  * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24  * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25  * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26  * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27  * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGES.
29  */
30 
31 #include <crypto/skcipher.h>
32 #include <linux/types.h>
33 #include <linux/jiffies.h>
34 #include <linux/sunrpc/gss_krb5.h>
35 #include <linux/pagemap.h>
36 
37 #include "gss_krb5_internal.h"
38 
39 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
40 # define RPCDBG_FACILITY	RPCDBG_AUTH
41 #endif
42 
43 #if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
44 
45 static inline int
46 gss_krb5_padding(int blocksize, int length)
47 {
48 	return blocksize - (length % blocksize);
49 }
50 
51 static inline void
52 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
53 {
54 	int padding = gss_krb5_padding(blocksize, buf->len - offset);
55 	char *p;
56 	struct kvec *iov;
57 
58 	if (buf->page_len || buf->tail[0].iov_len)
59 		iov = &buf->tail[0];
60 	else
61 		iov = &buf->head[0];
62 	p = iov->iov_base + iov->iov_len;
63 	iov->iov_len += padding;
64 	buf->len += padding;
65 	memset(p, padding, padding);
66 }
67 
68 static inline int
69 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
70 {
71 	u8 *ptr;
72 	u8 pad;
73 	size_t len = buf->len;
74 
75 	if (len <= buf->head[0].iov_len) {
76 		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
77 		if (pad > buf->head[0].iov_len)
78 			return -EINVAL;
79 		buf->head[0].iov_len -= pad;
80 		goto out;
81 	} else
82 		len -= buf->head[0].iov_len;
83 	if (len <= buf->page_len) {
84 		unsigned int last = (buf->page_base + len - 1)
85 					>>PAGE_SHIFT;
86 		unsigned int offset = (buf->page_base + len - 1)
87 					& (PAGE_SIZE - 1);
88 		ptr = kmap_atomic(buf->pages[last]);
89 		pad = *(ptr + offset);
90 		kunmap_atomic(ptr);
91 		goto out;
92 	} else
93 		len -= buf->page_len;
94 	BUG_ON(len > buf->tail[0].iov_len);
95 	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
96 out:
97 	/* XXX: NOTE: we do not adjust the page lengths--they represent
98 	 * a range of data in the real filesystem page cache, and we need
99 	 * to know that range so the xdr code can properly place read data.
100 	 * However adjusting the head length, as we do above, is harmless.
101 	 * In the case of a request that fits into a single page, the server
102 	 * also uses length and head length together to determine the original
103 	 * start of the request to copy the request for deferal; so it's
104 	 * easier on the server if we adjust head and tail length in tandem.
105 	 * It's not really a problem that we don't fool with the page and
106 	 * tail lengths, though--at worst badly formed xdr might lead the
107 	 * server to attempt to parse the padding.
108 	 * XXX: Document all these weird requirements for gss mechanism
109 	 * wrap/unwrap functions. */
110 	if (pad > blocksize)
111 		return -EINVAL;
112 	if (buf->len > pad)
113 		buf->len -= pad;
114 	else
115 		return -EINVAL;
116 	return 0;
117 }
118 
119 /* Assumptions: the head and tail of inbuf are ours to play with.
120  * The pages, however, may be real pages in the page cache and we replace
121  * them with scratch pages from **pages before writing to them. */
122 /* XXX: obviously the above should be documentation of wrap interface,
123  * and shouldn't be in this kerberos-specific file. */
124 
125 /* XXX factor out common code with seal/unseal. */
126 
127 u32
128 gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset,
129 		 struct xdr_buf *buf, struct page **pages)
130 {
131 	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
132 	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
133 					    .data = cksumdata};
134 	int			blocksize = 0, plainlen;
135 	unsigned char		*ptr, *msg_start;
136 	time64_t		now;
137 	int			headlen;
138 	struct page		**tmp_pages;
139 	u32			seq_send;
140 	u8			*cksumkey;
141 	u32			conflen = crypto_sync_skcipher_blocksize(kctx->enc);
142 
143 	dprintk("RPC:       %s\n", __func__);
144 
145 	now = ktime_get_real_seconds();
146 
147 	blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
148 	gss_krb5_add_padding(buf, offset, blocksize);
149 	BUG_ON((buf->len - offset) % blocksize);
150 	plainlen = conflen + buf->len - offset;
151 
152 	headlen = g_token_size(&kctx->mech_used,
153 		GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
154 		(buf->len - offset);
155 
156 	ptr = buf->head[0].iov_base + offset;
157 	/* shift data to make room for header. */
158 	xdr_extend_head(buf, offset, headlen);
159 
160 	/* XXX Would be cleverer to encrypt while copying. */
161 	BUG_ON((buf->len - offset - headlen) % blocksize);
162 
163 	g_make_token_header(&kctx->mech_used,
164 				GSS_KRB5_TOK_HDR_LEN +
165 				kctx->gk5e->cksumlength + plainlen, &ptr);
166 
167 
168 	/* ptr now at header described in rfc 1964, section 1.2.1: */
169 	ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
170 	ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
171 
172 	msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
173 
174 	/*
175 	 * signalg and sealalg are stored as if they were converted from LE
176 	 * to host endian, even though they're opaque pairs of bytes according
177 	 * to the RFC.
178 	 */
179 	*(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
180 	*(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
181 	ptr[6] = 0xff;
182 	ptr[7] = 0xff;
183 
184 	krb5_make_confounder(msg_start, conflen);
185 
186 	if (kctx->gk5e->keyed_cksum)
187 		cksumkey = kctx->cksum;
188 	else
189 		cksumkey = NULL;
190 
191 	/* XXXJBF: UGH!: */
192 	tmp_pages = buf->pages;
193 	buf->pages = pages;
194 	if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
195 					cksumkey, KG_USAGE_SEAL, &md5cksum))
196 		return GSS_S_FAILURE;
197 	buf->pages = tmp_pages;
198 
199 	memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
200 
201 	seq_send = atomic_fetch_inc(&kctx->seq_send);
202 
203 	/* XXX would probably be more efficient to compute checksum
204 	 * and encrypt at the same time: */
205 	if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
206 			       seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
207 		return GSS_S_FAILURE;
208 
209 	if (gss_encrypt_xdr_buf(kctx->enc, buf,
210 				offset + headlen - conflen, pages))
211 		return GSS_S_FAILURE;
212 
213 	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
214 }
215 
216 u32
217 gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len,
218 		   struct xdr_buf *buf, unsigned int *slack,
219 		   unsigned int *align)
220 {
221 	int			signalg;
222 	int			sealalg;
223 	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
224 	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
225 					    .data = cksumdata};
226 	time64_t		now;
227 	int			direction;
228 	s32			seqnum;
229 	unsigned char		*ptr;
230 	int			bodysize;
231 	void			*data_start, *orig_start;
232 	int			data_len;
233 	int			blocksize;
234 	u32			conflen = crypto_sync_skcipher_blocksize(kctx->enc);
235 	int			crypt_offset;
236 	u8			*cksumkey;
237 	unsigned int		saved_len = buf->len;
238 
239 	dprintk("RPC:       gss_unwrap_kerberos\n");
240 
241 	ptr = (u8 *)buf->head[0].iov_base + offset;
242 	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
243 					len - offset))
244 		return GSS_S_DEFECTIVE_TOKEN;
245 
246 	if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
247 	    (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
248 		return GSS_S_DEFECTIVE_TOKEN;
249 
250 	/* XXX sanity-check bodysize?? */
251 
252 	/* get the sign and seal algorithms */
253 
254 	signalg = ptr[2] + (ptr[3] << 8);
255 	if (signalg != kctx->gk5e->signalg)
256 		return GSS_S_DEFECTIVE_TOKEN;
257 
258 	sealalg = ptr[4] + (ptr[5] << 8);
259 	if (sealalg != kctx->gk5e->sealalg)
260 		return GSS_S_DEFECTIVE_TOKEN;
261 
262 	if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
263 		return GSS_S_DEFECTIVE_TOKEN;
264 
265 	/*
266 	 * Data starts after token header and checksum.  ptr points
267 	 * to the beginning of the token header
268 	 */
269 	crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
270 					(unsigned char *)buf->head[0].iov_base;
271 
272 	buf->len = len;
273 	if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
274 		return GSS_S_DEFECTIVE_TOKEN;
275 
276 	if (kctx->gk5e->keyed_cksum)
277 		cksumkey = kctx->cksum;
278 	else
279 		cksumkey = NULL;
280 
281 	if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
282 					cksumkey, KG_USAGE_SEAL, &md5cksum))
283 		return GSS_S_FAILURE;
284 
285 	if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
286 						kctx->gk5e->cksumlength))
287 		return GSS_S_BAD_SIG;
288 
289 	/* it got through unscathed.  Make sure the context is unexpired */
290 
291 	now = ktime_get_real_seconds();
292 
293 	if (now > kctx->endtime)
294 		return GSS_S_CONTEXT_EXPIRED;
295 
296 	/* do sequencing checks */
297 
298 	if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
299 			     ptr + 8, &direction, &seqnum))
300 		return GSS_S_BAD_SIG;
301 
302 	if ((kctx->initiate && direction != 0xff) ||
303 	    (!kctx->initiate && direction != 0))
304 		return GSS_S_BAD_SIG;
305 
306 	/* Copy the data back to the right position.  XXX: Would probably be
307 	 * better to copy and encrypt at the same time. */
308 
309 	blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
310 	data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
311 					conflen;
312 	orig_start = buf->head[0].iov_base + offset;
313 	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
314 	memmove(orig_start, data_start, data_len);
315 	buf->head[0].iov_len -= (data_start - orig_start);
316 	buf->len = len - (data_start - orig_start);
317 
318 	if (gss_krb5_remove_padding(buf, blocksize))
319 		return GSS_S_DEFECTIVE_TOKEN;
320 
321 	/* slack must include room for krb5 padding */
322 	*slack = XDR_QUADLEN(saved_len - buf->len);
323 	/* The GSS blob always precedes the RPC message payload */
324 	*align = *slack;
325 	return GSS_S_COMPLETE;
326 }
327 
328 #endif
329 
330 /*
331  * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
332  * to do more than that, we shift repeatedly.  Kevin Coffman reports
333  * seeing 28 bytes as the value used by Microsoft clients and servers
334  * with AES, so this constant is chosen to allow handling 28 in one pass
335  * without using too much stack space.
336  *
337  * If that proves to a problem perhaps we could use a more clever
338  * algorithm.
339  */
340 #define LOCAL_BUF_LEN 32u
341 
342 static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
343 {
344 	char head[LOCAL_BUF_LEN];
345 	char tmp[LOCAL_BUF_LEN];
346 	unsigned int this_len, i;
347 
348 	BUG_ON(shift > LOCAL_BUF_LEN);
349 
350 	read_bytes_from_xdr_buf(buf, 0, head, shift);
351 	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
352 		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
353 		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
354 		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
355 	}
356 	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
357 }
358 
359 static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
360 {
361 	int shifted = 0;
362 	int this_shift;
363 
364 	shift %= buf->len;
365 	while (shifted < shift) {
366 		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
367 		rotate_buf_a_little(buf, this_shift);
368 		shifted += this_shift;
369 	}
370 }
371 
372 static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
373 {
374 	struct xdr_buf subbuf;
375 
376 	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
377 	_rotate_left(&subbuf, shift);
378 }
379 
380 u32
381 gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset,
382 		 struct xdr_buf *buf, struct page **pages)
383 {
384 	u8		*ptr;
385 	time64_t	now;
386 	u8		flags = 0x00;
387 	__be16		*be16ptr;
388 	__be64		*be64ptr;
389 	u32		err;
390 
391 	dprintk("RPC:       %s\n", __func__);
392 
393 	/* make room for gss token header */
394 	if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
395 		return GSS_S_FAILURE;
396 
397 	/* construct gss token header */
398 	ptr = buf->head[0].iov_base + offset;
399 	*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
400 	*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
401 
402 	if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
403 		flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
404 	if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
405 		flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
406 	/* We always do confidentiality in wrap tokens */
407 	flags |= KG2_TOKEN_FLAG_SEALED;
408 
409 	*ptr++ = flags;
410 	*ptr++ = 0xff;
411 	be16ptr = (__be16 *)ptr;
412 
413 	*be16ptr++ = 0;
414 	/* "inner" token header always uses 0 for RRC */
415 	*be16ptr++ = 0;
416 
417 	be64ptr = (__be64 *)be16ptr;
418 	*be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
419 
420 	err = (*kctx->gk5e->encrypt)(kctx, offset, buf, pages);
421 	if (err)
422 		return err;
423 
424 	now = ktime_get_real_seconds();
425 	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
426 }
427 
428 u32
429 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len,
430 		   struct xdr_buf *buf, unsigned int *slack,
431 		   unsigned int *align)
432 {
433 	time64_t	now;
434 	u8		*ptr;
435 	u8		flags = 0x00;
436 	u16		ec, rrc;
437 	int		err;
438 	u32		headskip, tailskip;
439 	u8		decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
440 	unsigned int	movelen;
441 
442 
443 	dprintk("RPC:       %s\n", __func__);
444 
445 	ptr = buf->head[0].iov_base + offset;
446 
447 	if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
448 		return GSS_S_DEFECTIVE_TOKEN;
449 
450 	flags = ptr[2];
451 	if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
452 	    (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
453 		return GSS_S_BAD_SIG;
454 
455 	if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
456 		dprintk("%s: token missing expected sealed flag\n", __func__);
457 		return GSS_S_DEFECTIVE_TOKEN;
458 	}
459 
460 	if (ptr[3] != 0xff)
461 		return GSS_S_DEFECTIVE_TOKEN;
462 
463 	ec = be16_to_cpup((__be16 *)(ptr + 4));
464 	rrc = be16_to_cpup((__be16 *)(ptr + 6));
465 
466 	/*
467 	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
468 	 * doesn't want it checked; see page 6 of rfc 2203.
469 	 */
470 
471 	if (rrc != 0)
472 		rotate_left(offset + 16, buf, rrc);
473 
474 	err = (*kctx->gk5e->decrypt)(kctx, offset, len, buf,
475 				     &headskip, &tailskip);
476 	if (err)
477 		return GSS_S_FAILURE;
478 
479 	/*
480 	 * Retrieve the decrypted gss token header and verify
481 	 * it against the original
482 	 */
483 	err = read_bytes_from_xdr_buf(buf,
484 				len - GSS_KRB5_TOK_HDR_LEN - tailskip,
485 				decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
486 	if (err) {
487 		dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
488 		return GSS_S_FAILURE;
489 	}
490 	if (memcmp(ptr, decrypted_hdr, 6)
491 				|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
492 		dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
493 		return GSS_S_FAILURE;
494 	}
495 
496 	/* do sequencing checks */
497 
498 	/* it got through unscathed.  Make sure the context is unexpired */
499 	now = ktime_get_real_seconds();
500 	if (now > kctx->endtime)
501 		return GSS_S_CONTEXT_EXPIRED;
502 
503 	/*
504 	 * Move the head data back to the right position in xdr_buf.
505 	 * We ignore any "ec" data since it might be in the head or
506 	 * the tail, and we really don't need to deal with it.
507 	 * Note that buf->head[0].iov_len may indicate the available
508 	 * head buffer space rather than that actually occupied.
509 	 */
510 	movelen = min_t(unsigned int, buf->head[0].iov_len, len);
511 	movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
512 	BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
513 							buf->head[0].iov_len);
514 	memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
515 	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
516 	buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
517 
518 	/* Trim off the trailing "extra count" and checksum blob */
519 	xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
520 
521 	*align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
522 	*slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
523 	return GSS_S_COMPLETE;
524 }
525