1 /*
2  *  linux/net/sunrpc/gss_krb5_crypto.c
3  *
4  *  Copyright (c) 2000 The Regents of the University of Michigan.
5  *  All rights reserved.
6  *
7  *  Andy Adamson   <andros@umich.edu>
8  *  Bruce Fields   <bfields@umich.edu>
9  */
10 
11 /*
12  * Copyright (C) 1998 by the FundsXpress, INC.
13  *
14  * All rights reserved.
15  *
16  * Export of this software from the United States of America may require
17  * a specific license from the United States Government.  It is the
18  * responsibility of any person or organization contemplating export to
19  * obtain such a license before exporting.
20  *
21  * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22  * distribute this software and its documentation for any purpose and
23  * without fee is hereby granted, provided that the above copyright
24  * notice appear in all copies and that both that copyright notice and
25  * this permission notice appear in supporting documentation, and that
26  * the name of FundsXpress. not be used in advertising or publicity pertaining
27  * to distribution of the software without specific, written prior
28  * permission.  FundsXpress makes no representations about the suitability of
29  * this software for any purpose.  It is provided "as is" without express
30  * or implied warranty.
31  *
32  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
35  */
36 
37 #include <linux/err.h>
38 #include <linux/types.h>
39 #include <linux/mm.h>
40 #include <linux/slab.h>
41 #include <linux/scatterlist.h>
42 #include <linux/crypto.h>
43 #include <linux/highmem.h>
44 #include <linux/pagemap.h>
45 #include <linux/sunrpc/gss_krb5.h>
46 #include <linux/sunrpc/xdr.h>
47 
48 #ifdef RPC_DEBUG
49 # define RPCDBG_FACILITY        RPCDBG_AUTH
50 #endif
51 
52 u32
53 krb5_encrypt(
54 	struct crypto_blkcipher *tfm,
55 	void * iv,
56 	void * in,
57 	void * out,
58 	int length)
59 {
60 	u32 ret = -EINVAL;
61 	struct scatterlist sg[1];
62 	u8 local_iv[16] = {0};
63 	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
64 
65 	if (length % crypto_blkcipher_blocksize(tfm) != 0)
66 		goto out;
67 
68 	if (crypto_blkcipher_ivsize(tfm) > 16) {
69 		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
70 			crypto_blkcipher_ivsize(tfm));
71 		goto out;
72 	}
73 
74 	if (iv)
75 		memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 
77 	memcpy(out, in, length);
78 	sg_init_one(sg, out, length);
79 
80 	ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81 out:
82 	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
83 	return ret;
84 }
85 
86 u32
87 krb5_decrypt(
88      struct crypto_blkcipher *tfm,
89      void * iv,
90      void * in,
91      void * out,
92      int length)
93 {
94 	u32 ret = -EINVAL;
95 	struct scatterlist sg[1];
96 	u8 local_iv[16] = {0};
97 	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
98 
99 	if (length % crypto_blkcipher_blocksize(tfm) != 0)
100 		goto out;
101 
102 	if (crypto_blkcipher_ivsize(tfm) > 16) {
103 		dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
104 			crypto_blkcipher_ivsize(tfm));
105 		goto out;
106 	}
107 	if (iv)
108 		memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
109 
110 	memcpy(out, in, length);
111 	sg_init_one(sg, out, length);
112 
113 	ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
114 out:
115 	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
116 	return ret;
117 }
118 
119 static int
120 checksummer(struct scatterlist *sg, void *data)
121 {
122 	struct hash_desc *desc = data;
123 
124 	return crypto_hash_update(desc, sg, sg->length);
125 }
126 
127 /* checksum the plaintext data and hdrlen bytes of the token header */
128 s32
129 make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
130 		   int body_offset, struct xdr_netobj *cksum)
131 {
132 	struct hash_desc                desc; /* XXX add to ctx? */
133 	struct scatterlist              sg[1];
134 	int err;
135 
136 	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
137 	if (IS_ERR(desc.tfm))
138 		return GSS_S_FAILURE;
139 	cksum->len = crypto_hash_digestsize(desc.tfm);
140 	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
141 
142 	err = crypto_hash_init(&desc);
143 	if (err)
144 		goto out;
145 	sg_init_one(sg, header, hdrlen);
146 	err = crypto_hash_update(&desc, sg, hdrlen);
147 	if (err)
148 		goto out;
149 	err = xdr_process_buf(body, body_offset, body->len - body_offset,
150 			      checksummer, &desc);
151 	if (err)
152 		goto out;
153 	err = crypto_hash_final(&desc, cksum->data);
154 
155 out:
156 	crypto_free_hash(desc.tfm);
157 	return err ? GSS_S_FAILURE : 0;
158 }
159 
160 struct encryptor_desc {
161 	u8 iv[8]; /* XXX hard-coded blocksize */
162 	struct blkcipher_desc desc;
163 	int pos;
164 	struct xdr_buf *outbuf;
165 	struct page **pages;
166 	struct scatterlist infrags[4];
167 	struct scatterlist outfrags[4];
168 	int fragno;
169 	int fraglen;
170 };
171 
172 static int
173 encryptor(struct scatterlist *sg, void *data)
174 {
175 	struct encryptor_desc *desc = data;
176 	struct xdr_buf *outbuf = desc->outbuf;
177 	struct page *in_page;
178 	int thislen = desc->fraglen + sg->length;
179 	int fraglen, ret;
180 	int page_pos;
181 
182 	/* Worst case is 4 fragments: head, end of page 1, start
183 	 * of page 2, tail.  Anything more is a bug. */
184 	BUG_ON(desc->fragno > 3);
185 
186 	page_pos = desc->pos - outbuf->head[0].iov_len;
187 	if (page_pos >= 0 && page_pos < outbuf->page_len) {
188 		/* pages are not in place: */
189 		int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
190 		in_page = desc->pages[i];
191 	} else {
192 		in_page = sg_page(sg);
193 	}
194 	sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
195 		    sg->offset);
196 	sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
197 		    sg->offset);
198 	desc->fragno++;
199 	desc->fraglen += sg->length;
200 	desc->pos += sg->length;
201 
202 	fraglen = thislen & 7; /* XXX hardcoded blocksize */
203 	thislen -= fraglen;
204 
205 	if (thislen == 0)
206 		return 0;
207 
208 	sg_mark_end(&desc->infrags[desc->fragno - 1]);
209 	sg_mark_end(&desc->outfrags[desc->fragno - 1]);
210 
211 	ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
212 					  desc->infrags, thislen);
213 	if (ret)
214 		return ret;
215 
216 	sg_init_table(desc->infrags, 4);
217 	sg_init_table(desc->outfrags, 4);
218 
219 	if (fraglen) {
220 		sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
221 				sg->offset + sg->length - fraglen);
222 		desc->infrags[0] = desc->outfrags[0];
223 		sg_assign_page(&desc->infrags[0], in_page);
224 		desc->fragno = 1;
225 		desc->fraglen = fraglen;
226 	} else {
227 		desc->fragno = 0;
228 		desc->fraglen = 0;
229 	}
230 	return 0;
231 }
232 
233 int
234 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
235 		    int offset, struct page **pages)
236 {
237 	int ret;
238 	struct encryptor_desc desc;
239 
240 	BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
241 
242 	memset(desc.iv, 0, sizeof(desc.iv));
243 	desc.desc.tfm = tfm;
244 	desc.desc.info = desc.iv;
245 	desc.desc.flags = 0;
246 	desc.pos = offset;
247 	desc.outbuf = buf;
248 	desc.pages = pages;
249 	desc.fragno = 0;
250 	desc.fraglen = 0;
251 
252 	sg_init_table(desc.infrags, 4);
253 	sg_init_table(desc.outfrags, 4);
254 
255 	ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
256 	return ret;
257 }
258 
259 struct decryptor_desc {
260 	u8 iv[8]; /* XXX hard-coded blocksize */
261 	struct blkcipher_desc desc;
262 	struct scatterlist frags[4];
263 	int fragno;
264 	int fraglen;
265 };
266 
267 static int
268 decryptor(struct scatterlist *sg, void *data)
269 {
270 	struct decryptor_desc *desc = data;
271 	int thislen = desc->fraglen + sg->length;
272 	int fraglen, ret;
273 
274 	/* Worst case is 4 fragments: head, end of page 1, start
275 	 * of page 2, tail.  Anything more is a bug. */
276 	BUG_ON(desc->fragno > 3);
277 	sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
278 		    sg->offset);
279 	desc->fragno++;
280 	desc->fraglen += sg->length;
281 
282 	fraglen = thislen & 7; /* XXX hardcoded blocksize */
283 	thislen -= fraglen;
284 
285 	if (thislen == 0)
286 		return 0;
287 
288 	sg_mark_end(&desc->frags[desc->fragno - 1]);
289 
290 	ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
291 					  desc->frags, thislen);
292 	if (ret)
293 		return ret;
294 
295 	sg_init_table(desc->frags, 4);
296 
297 	if (fraglen) {
298 		sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
299 				sg->offset + sg->length - fraglen);
300 		desc->fragno = 1;
301 		desc->fraglen = fraglen;
302 	} else {
303 		desc->fragno = 0;
304 		desc->fraglen = 0;
305 	}
306 	return 0;
307 }
308 
309 int
310 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
311 		    int offset)
312 {
313 	struct decryptor_desc desc;
314 
315 	/* XXXJBF: */
316 	BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
317 
318 	memset(desc.iv, 0, sizeof(desc.iv));
319 	desc.desc.tfm = tfm;
320 	desc.desc.info = desc.iv;
321 	desc.desc.flags = 0;
322 	desc.fragno = 0;
323 	desc.fraglen = 0;
324 
325 	sg_init_table(desc.frags, 4);
326 
327 	return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
328 }
329