181d4a433SKevin Coffman /*
281d4a433SKevin Coffman  * COPYRIGHT (c) 2008
381d4a433SKevin Coffman  * The Regents of the University of Michigan
481d4a433SKevin Coffman  * ALL RIGHTS RESERVED
581d4a433SKevin Coffman  *
681d4a433SKevin Coffman  * Permission is granted to use, copy, create derivative works
781d4a433SKevin Coffman  * and redistribute this software and such derivative works
881d4a433SKevin Coffman  * for any purpose, so long as the name of The University of
981d4a433SKevin Coffman  * Michigan is not used in any advertising or publicity
1081d4a433SKevin Coffman  * pertaining to the use of distribution of this software
1181d4a433SKevin Coffman  * without specific, written prior authorization.  If the
1281d4a433SKevin Coffman  * above copyright notice or any other identification of the
1381d4a433SKevin Coffman  * University of Michigan is included in any copy of any
1481d4a433SKevin Coffman  * portion of this software, then the disclaimer below must
1581d4a433SKevin Coffman  * also be included.
1681d4a433SKevin Coffman  *
1781d4a433SKevin Coffman  * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
1881d4a433SKevin Coffman  * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
1981d4a433SKevin Coffman  * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
2081d4a433SKevin Coffman  * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
2181d4a433SKevin Coffman  * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
2281d4a433SKevin Coffman  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
2381d4a433SKevin Coffman  * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
2481d4a433SKevin Coffman  * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
2581d4a433SKevin Coffman  * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
2681d4a433SKevin Coffman  * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
2781d4a433SKevin Coffman  * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
2881d4a433SKevin Coffman  * SUCH DAMAGES.
2981d4a433SKevin Coffman  */
3081d4a433SKevin Coffman 
313b5cf20cSHerbert Xu #include <crypto/skcipher.h>
3214ae162cSJ. Bruce Fields #include <linux/types.h>
3314ae162cSJ. Bruce Fields #include <linux/jiffies.h>
3414ae162cSJ. Bruce Fields #include <linux/sunrpc/gss_krb5.h>
3514ae162cSJ. Bruce Fields #include <linux/pagemap.h>
3614ae162cSJ. Bruce Fields 
377f675ca7SChuck Lever #include "gss_krb5_internal.h"
387f675ca7SChuck Lever 
39f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
4014ae162cSJ. Bruce Fields # define RPCDBG_FACILITY	RPCDBG_AUTH
4114ae162cSJ. Bruce Fields #endif
4214ae162cSJ. Bruce Fields 
43de9c17ebSKevin Coffman /*
44c52226daSJ. Bruce Fields  * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
45c52226daSJ. Bruce Fields  * to do more than that, we shift repeatedly.  Kevin Coffman reports
46c52226daSJ. Bruce Fields  * seeing 28 bytes as the value used by Microsoft clients and servers
47c52226daSJ. Bruce Fields  * with AES, so this constant is chosen to allow handling 28 in one pass
48c52226daSJ. Bruce Fields  * without using too much stack space.
49c52226daSJ. Bruce Fields  *
50c52226daSJ. Bruce Fields  * If that proves to a problem perhaps we could use a more clever
51c52226daSJ. Bruce Fields  * algorithm.
52de9c17ebSKevin Coffman  */
53c52226daSJ. Bruce Fields #define LOCAL_BUF_LEN 32u
54c52226daSJ. Bruce Fields 
rotate_buf_a_little(struct xdr_buf * buf,unsigned int shift)55c52226daSJ. Bruce Fields static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
56de9c17ebSKevin Coffman {
57c52226daSJ. Bruce Fields 	char head[LOCAL_BUF_LEN];
58c52226daSJ. Bruce Fields 	char tmp[LOCAL_BUF_LEN];
59c52226daSJ. Bruce Fields 	unsigned int this_len, i;
60de9c17ebSKevin Coffman 
61c52226daSJ. Bruce Fields 	BUG_ON(shift > LOCAL_BUF_LEN);
62de9c17ebSKevin Coffman 
63c52226daSJ. Bruce Fields 	read_bytes_from_xdr_buf(buf, 0, head, shift);
64c52226daSJ. Bruce Fields 	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
65c52226daSJ. Bruce Fields 		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
66c52226daSJ. Bruce Fields 		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
67c52226daSJ. Bruce Fields 		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
68c52226daSJ. Bruce Fields 	}
69c52226daSJ. Bruce Fields 	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
70c52226daSJ. Bruce Fields }
71c52226daSJ. Bruce Fields 
_rotate_left(struct xdr_buf * buf,unsigned int shift)72c52226daSJ. Bruce Fields static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
73c52226daSJ. Bruce Fields {
74c52226daSJ. Bruce Fields 	int shifted = 0;
75c52226daSJ. Bruce Fields 	int this_shift;
76c52226daSJ. Bruce Fields 
77c52226daSJ. Bruce Fields 	shift %= buf->len;
78c52226daSJ. Bruce Fields 	while (shifted < shift) {
79c52226daSJ. Bruce Fields 		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
80c52226daSJ. Bruce Fields 		rotate_buf_a_little(buf, this_shift);
81c52226daSJ. Bruce Fields 		shifted += this_shift;
82c52226daSJ. Bruce Fields 	}
83c52226daSJ. Bruce Fields }
84c52226daSJ. Bruce Fields 
rotate_left(u32 base,struct xdr_buf * buf,unsigned int shift)85c52226daSJ. Bruce Fields static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
86c52226daSJ. Bruce Fields {
87c52226daSJ. Bruce Fields 	struct xdr_buf subbuf;
88c52226daSJ. Bruce Fields 
89c52226daSJ. Bruce Fields 	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
90c52226daSJ. Bruce Fields 	_rotate_left(&subbuf, shift);
91de9c17ebSKevin Coffman }
92de9c17ebSKevin Coffman 
93e01b2c79SChuck Lever u32
gss_krb5_wrap_v2(struct krb5_ctx * kctx,int offset,struct xdr_buf * buf,struct page ** pages)94e01b2c79SChuck Lever gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset,
95de9c17ebSKevin Coffman 		 struct xdr_buf *buf, struct page **pages)
96de9c17ebSKevin Coffman {
97ab22e2cbSColin Ian King 	u8		*ptr;
98294ec5b8SArnd Bergmann 	time64_t	now;
99de9c17ebSKevin Coffman 	u8		flags = 0x00;
100b36e9c44SJeff Layton 	__be16		*be16ptr;
101de9c17ebSKevin Coffman 	__be64		*be64ptr;
102de9c17ebSKevin Coffman 	u32		err;
103de9c17ebSKevin Coffman 
104de9c17ebSKevin Coffman 	dprintk("RPC:       %s\n", __func__);
105de9c17ebSKevin Coffman 
106de9c17ebSKevin Coffman 	/* make room for gss token header */
107de9c17ebSKevin Coffman 	if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
108de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
109de9c17ebSKevin Coffman 
110de9c17ebSKevin Coffman 	/* construct gss token header */
111ab22e2cbSColin Ian King 	ptr = buf->head[0].iov_base + offset;
112de9c17ebSKevin Coffman 	*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
113de9c17ebSKevin Coffman 	*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
114de9c17ebSKevin Coffman 
115de9c17ebSKevin Coffman 	if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
116de9c17ebSKevin Coffman 		flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
117de9c17ebSKevin Coffman 	if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
118de9c17ebSKevin Coffman 		flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
119de9c17ebSKevin Coffman 	/* We always do confidentiality in wrap tokens */
120de9c17ebSKevin Coffman 	flags |= KG2_TOKEN_FLAG_SEALED;
121de9c17ebSKevin Coffman 
122de9c17ebSKevin Coffman 	*ptr++ = flags;
123de9c17ebSKevin Coffman 	*ptr++ = 0xff;
124de9c17ebSKevin Coffman 	be16ptr = (__be16 *)ptr;
125de9c17ebSKevin Coffman 
126b36e9c44SJeff Layton 	*be16ptr++ = 0;
127de9c17ebSKevin Coffman 	/* "inner" token header always uses 0 for RRC */
128b36e9c44SJeff Layton 	*be16ptr++ = 0;
129de9c17ebSKevin Coffman 
130de9c17ebSKevin Coffman 	be64ptr = (__be64 *)be16ptr;
131c3be6577SPaul Burton 	*be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
132de9c17ebSKevin Coffman 
133*ae6ad5d0SChuck Lever 	err = (*kctx->gk5e->encrypt)(kctx, offset, buf, pages);
134de9c17ebSKevin Coffman 	if (err)
135de9c17ebSKevin Coffman 		return err;
136de9c17ebSKevin Coffman 
137294ec5b8SArnd Bergmann 	now = ktime_get_real_seconds();
138de9c17ebSKevin Coffman 	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
139de9c17ebSKevin Coffman }
140de9c17ebSKevin Coffman 
141e01b2c79SChuck Lever u32
gss_krb5_unwrap_v2(struct krb5_ctx * kctx,int offset,int len,struct xdr_buf * buf,unsigned int * slack,unsigned int * align)142e01b2c79SChuck Lever gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len,
143a7e429a6SChuck Lever 		   struct xdr_buf *buf, unsigned int *slack,
144a7e429a6SChuck Lever 		   unsigned int *align)
145de9c17ebSKevin Coffman {
146294ec5b8SArnd Bergmann 	time64_t	now;
147de9c17ebSKevin Coffman 	u8		*ptr;
148de9c17ebSKevin Coffman 	u8		flags = 0x00;
149de9c17ebSKevin Coffman 	u16		ec, rrc;
150de9c17ebSKevin Coffman 	int		err;
151de9c17ebSKevin Coffman 	u32		headskip, tailskip;
152de9c17ebSKevin Coffman 	u8		decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
153de9c17ebSKevin Coffman 	unsigned int	movelen;
154de9c17ebSKevin Coffman 
155de9c17ebSKevin Coffman 
156de9c17ebSKevin Coffman 	dprintk("RPC:       %s\n", __func__);
157de9c17ebSKevin Coffman 
158de9c17ebSKevin Coffman 	ptr = buf->head[0].iov_base + offset;
159de9c17ebSKevin Coffman 
160de9c17ebSKevin Coffman 	if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
161de9c17ebSKevin Coffman 		return GSS_S_DEFECTIVE_TOKEN;
162de9c17ebSKevin Coffman 
163de9c17ebSKevin Coffman 	flags = ptr[2];
164de9c17ebSKevin Coffman 	if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
165de9c17ebSKevin Coffman 	    (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
166de9c17ebSKevin Coffman 		return GSS_S_BAD_SIG;
167de9c17ebSKevin Coffman 
168de9c17ebSKevin Coffman 	if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
169de9c17ebSKevin Coffman 		dprintk("%s: token missing expected sealed flag\n", __func__);
170de9c17ebSKevin Coffman 		return GSS_S_DEFECTIVE_TOKEN;
171de9c17ebSKevin Coffman 	}
172de9c17ebSKevin Coffman 
173de9c17ebSKevin Coffman 	if (ptr[3] != 0xff)
174de9c17ebSKevin Coffman 		return GSS_S_DEFECTIVE_TOKEN;
175de9c17ebSKevin Coffman 
176de9c17ebSKevin Coffman 	ec = be16_to_cpup((__be16 *)(ptr + 4));
177de9c17ebSKevin Coffman 	rrc = be16_to_cpup((__be16 *)(ptr + 6));
178de9c17ebSKevin Coffman 
1795d6baef9SJ. Bruce Fields 	/*
1805d6baef9SJ. Bruce Fields 	 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss
1815d6baef9SJ. Bruce Fields 	 * doesn't want it checked; see page 6 of rfc 2203.
1825d6baef9SJ. Bruce Fields 	 */
183de9c17ebSKevin Coffman 
184c52226daSJ. Bruce Fields 	if (rrc != 0)
185c52226daSJ. Bruce Fields 		rotate_left(offset + 16, buf, rrc);
186de9c17ebSKevin Coffman 
187*ae6ad5d0SChuck Lever 	err = (*kctx->gk5e->decrypt)(kctx, offset, len, buf,
188de9c17ebSKevin Coffman 				     &headskip, &tailskip);
189de9c17ebSKevin Coffman 	if (err)
190de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
191de9c17ebSKevin Coffman 
192de9c17ebSKevin Coffman 	/*
193de9c17ebSKevin Coffman 	 * Retrieve the decrypted gss token header and verify
194de9c17ebSKevin Coffman 	 * it against the original
195de9c17ebSKevin Coffman 	 */
196de9c17ebSKevin Coffman 	err = read_bytes_from_xdr_buf(buf,
19731c9590aSChuck Lever 				len - GSS_KRB5_TOK_HDR_LEN - tailskip,
198de9c17ebSKevin Coffman 				decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
199de9c17ebSKevin Coffman 	if (err) {
200de9c17ebSKevin Coffman 		dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
201de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
202de9c17ebSKevin Coffman 	}
203de9c17ebSKevin Coffman 	if (memcmp(ptr, decrypted_hdr, 6)
204de9c17ebSKevin Coffman 				|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
205de9c17ebSKevin Coffman 		dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
206de9c17ebSKevin Coffman 		return GSS_S_FAILURE;
207de9c17ebSKevin Coffman 	}
208de9c17ebSKevin Coffman 
209de9c17ebSKevin Coffman 	/* do sequencing checks */
210de9c17ebSKevin Coffman 
211de9c17ebSKevin Coffman 	/* it got through unscathed.  Make sure the context is unexpired */
212294ec5b8SArnd Bergmann 	now = ktime_get_real_seconds();
213de9c17ebSKevin Coffman 	if (now > kctx->endtime)
214de9c17ebSKevin Coffman 		return GSS_S_CONTEXT_EXPIRED;
215de9c17ebSKevin Coffman 
216de9c17ebSKevin Coffman 	/*
217de9c17ebSKevin Coffman 	 * Move the head data back to the right position in xdr_buf.
218de9c17ebSKevin Coffman 	 * We ignore any "ec" data since it might be in the head or
219de9c17ebSKevin Coffman 	 * the tail, and we really don't need to deal with it.
220de9c17ebSKevin Coffman 	 * Note that buf->head[0].iov_len may indicate the available
221de9c17ebSKevin Coffman 	 * head buffer space rather than that actually occupied.
222de9c17ebSKevin Coffman 	 */
22331c9590aSChuck Lever 	movelen = min_t(unsigned int, buf->head[0].iov_len, len);
224de9c17ebSKevin Coffman 	movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
2250a8e7b7dSChuck Lever 	BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
2260a8e7b7dSChuck Lever 							buf->head[0].iov_len);
227de9c17ebSKevin Coffman 	memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
228de9c17ebSKevin Coffman 	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
229986a4b63SChuck Lever 	buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip);
230de9c17ebSKevin Coffman 
231cf4c024bSJeff Layton 	/* Trim off the trailing "extra count" and checksum blob */
2320a8e7b7dSChuck Lever 	xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
233241b1f41SChuck Lever 
234a7e429a6SChuck Lever 	*align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip);
235a7e429a6SChuck Lever 	*slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
236de9c17ebSKevin Coffman 	return GSS_S_COMPLETE;
237de9c17ebSKevin Coffman }
238