xref: /openbmc/linux/drivers/crypto/omap-aes-gcm.c (revision b877ad1a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for OMAP AES GCM HW acceleration.
6  *
7  * Copyright (c) 2016 Texas Instruments Incorporated
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/omap-dma.h>
15 #include <linux/interrupt.h>
16 #include <crypto/aes.h>
17 #include <crypto/gcm.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/skcipher.h>
20 #include <crypto/internal/aead.h>
21 
22 #include "omap-crypto.h"
23 #include "omap-aes.h"
24 
25 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
26 				     struct aead_request *req);
27 
28 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
29 {
30 	struct aead_request *req = dd->aead_req;
31 
32 	dd->flags &= ~FLAGS_BUSY;
33 	dd->in_sg = NULL;
34 	dd->out_sg = NULL;
35 
36 	req->base.complete(&req->base, ret);
37 }
38 
39 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
40 {
41 	u8 *tag;
42 	int alen, clen, i, ret = 0, nsg;
43 	struct omap_aes_reqctx *rctx;
44 
45 	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
46 	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
47 	rctx = aead_request_ctx(dd->aead_req);
48 
49 	nsg = !!(dd->assoc_len && dd->total);
50 
51 	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
52 			       DMA_FROM_DEVICE);
53 	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
54 	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
55 	omap_aes_crypt_dma_stop(dd);
56 
57 	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
58 			    dd->aead_req->assoclen, dd->total,
59 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
60 
61 	if (dd->flags & FLAGS_ENCRYPT)
62 		scatterwalk_map_and_copy(rctx->auth_tag,
63 					 dd->aead_req->dst,
64 					 dd->total + dd->aead_req->assoclen,
65 					 dd->authsize, 1);
66 
67 	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
68 			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
69 
70 	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
71 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
72 
73 	if (!(dd->flags & FLAGS_ENCRYPT)) {
74 		tag = (u8 *)rctx->auth_tag;
75 		for (i = 0; i < dd->authsize; i++) {
76 			if (tag[i]) {
77 				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
78 				ret = -EBADMSG;
79 			}
80 		}
81 	}
82 
83 	omap_aes_gcm_finish_req(dd, ret);
84 	omap_aes_gcm_handle_queue(dd, NULL);
85 }
86 
87 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
88 				     struct aead_request *req)
89 {
90 	int alen, clen, cryptlen, assoclen, ret;
91 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
92 	unsigned int authlen = crypto_aead_authsize(aead);
93 	struct scatterlist *tmp, sg_arr[2];
94 	int nsg;
95 	u16 flags;
96 
97 	assoclen = req->assoclen;
98 	cryptlen = req->cryptlen;
99 
100 	if (dd->flags & FLAGS_RFC4106_GCM)
101 		assoclen -= 8;
102 
103 	if (!(dd->flags & FLAGS_ENCRYPT))
104 		cryptlen -= authlen;
105 
106 	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
107 	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
108 
109 	nsg = !!(assoclen && cryptlen);
110 
111 	omap_aes_clear_copy_flags(dd);
112 
113 	sg_init_table(dd->in_sgl, nsg + 1);
114 	if (assoclen) {
115 		tmp = req->src;
116 		ret = omap_crypto_align_sg(&tmp, assoclen,
117 					   AES_BLOCK_SIZE, dd->in_sgl,
118 					   OMAP_CRYPTO_COPY_DATA |
119 					   OMAP_CRYPTO_ZERO_BUF |
120 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
121 					   FLAGS_ASSOC_DATA_ST_SHIFT,
122 					   &dd->flags);
123 		if (ret)
124 			return ret;
125 	}
126 
127 	if (cryptlen) {
128 		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
129 
130 		ret = omap_crypto_align_sg(&tmp, cryptlen,
131 					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
132 					   OMAP_CRYPTO_COPY_DATA |
133 					   OMAP_CRYPTO_ZERO_BUF |
134 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
135 					   FLAGS_IN_DATA_ST_SHIFT,
136 					   &dd->flags);
137 		if (ret)
138 			return ret;
139 	}
140 
141 	dd->in_sg = dd->in_sgl;
142 	dd->total = cryptlen;
143 	dd->assoc_len = assoclen;
144 	dd->authsize = authlen;
145 
146 	dd->out_sg = req->dst;
147 	dd->orig_out = req->dst;
148 
149 	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
150 
151 	flags = 0;
152 	if (req->src == req->dst || dd->out_sg == sg_arr)
153 		flags |= OMAP_CRYPTO_FORCE_COPY;
154 
155 	if (cryptlen) {
156 		ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
157 					   AES_BLOCK_SIZE, &dd->out_sgl,
158 					   flags,
159 					   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
160 		if (ret)
161 			return ret;
162 	}
163 
164 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
165 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
166 
167 	return 0;
168 }
169 
170 static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
171 {
172 	struct omap_aes_gcm_result *res = req->data;
173 
174 	if (err == -EINPROGRESS)
175 		return;
176 
177 	res->err = err;
178 	complete(&res->completion);
179 }
180 
181 static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
182 {
183 	struct scatterlist iv_sg, tag_sg;
184 	struct skcipher_request *sk_req;
185 	struct omap_aes_gcm_result result;
186 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
187 	int ret = 0;
188 
189 	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
190 	if (!sk_req) {
191 		pr_err("skcipher: Failed to allocate request\n");
192 		return -ENOMEM;
193 	}
194 
195 	init_completion(&result.completion);
196 
197 	sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
198 	sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
199 	skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
200 				      omap_aes_gcm_complete, &result);
201 	ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
202 	skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
203 				   NULL);
204 	ret = crypto_skcipher_encrypt(sk_req);
205 	switch (ret) {
206 	case 0:
207 		break;
208 	case -EINPROGRESS:
209 	case -EBUSY:
210 		ret = wait_for_completion_interruptible(&result.completion);
211 		if (!ret) {
212 			ret = result.err;
213 			if (!ret) {
214 				reinit_completion(&result.completion);
215 				break;
216 			}
217 		}
218 		/* fall through */
219 	default:
220 		pr_err("Encryption of IV failed for GCM mode\n");
221 		break;
222 	}
223 
224 	skcipher_request_free(sk_req);
225 	return ret;
226 }
227 
228 void omap_aes_gcm_dma_out_callback(void *data)
229 {
230 	struct omap_aes_dev *dd = data;
231 	struct omap_aes_reqctx *rctx;
232 	int i, val;
233 	u32 *auth_tag, tag[4];
234 
235 	if (!(dd->flags & FLAGS_ENCRYPT))
236 		scatterwalk_map_and_copy(tag, dd->aead_req->src,
237 					 dd->total + dd->aead_req->assoclen,
238 					 dd->authsize, 0);
239 
240 	rctx = aead_request_ctx(dd->aead_req);
241 	auth_tag = (u32 *)rctx->auth_tag;
242 	for (i = 0; i < 4; i++) {
243 		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
244 		auth_tag[i] = val ^ auth_tag[i];
245 		if (!(dd->flags & FLAGS_ENCRYPT))
246 			auth_tag[i] = auth_tag[i] ^ tag[i];
247 	}
248 
249 	omap_aes_gcm_done_task(dd);
250 }
251 
252 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
253 				     struct aead_request *req)
254 {
255 	struct omap_aes_ctx *ctx;
256 	struct aead_request *backlog;
257 	struct omap_aes_reqctx *rctx;
258 	unsigned long flags;
259 	int err, ret = 0;
260 
261 	spin_lock_irqsave(&dd->lock, flags);
262 	if (req)
263 		ret = aead_enqueue_request(&dd->aead_queue, req);
264 	if (dd->flags & FLAGS_BUSY) {
265 		spin_unlock_irqrestore(&dd->lock, flags);
266 		return ret;
267 	}
268 
269 	backlog = aead_get_backlog(&dd->aead_queue);
270 	req = aead_dequeue_request(&dd->aead_queue);
271 	if (req)
272 		dd->flags |= FLAGS_BUSY;
273 	spin_unlock_irqrestore(&dd->lock, flags);
274 
275 	if (!req)
276 		return ret;
277 
278 	if (backlog)
279 		backlog->base.complete(&backlog->base, -EINPROGRESS);
280 
281 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
282 	rctx = aead_request_ctx(req);
283 
284 	dd->ctx = ctx;
285 	rctx->dd = dd;
286 	dd->aead_req = req;
287 
288 	rctx->mode &= FLAGS_MODE_MASK;
289 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
290 
291 	err = omap_aes_gcm_copy_buffers(dd, req);
292 	if (err)
293 		return err;
294 
295 	err = omap_aes_write_ctrl(dd);
296 	if (!err) {
297 		if (dd->in_sg_len && dd->out_sg_len)
298 			err = omap_aes_crypt_dma_start(dd);
299 		else
300 			omap_aes_gcm_dma_out_callback(dd);
301 	}
302 
303 	if (err) {
304 		omap_aes_gcm_finish_req(dd, err);
305 		omap_aes_gcm_handle_queue(dd, NULL);
306 	}
307 
308 	return ret;
309 }
310 
311 static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
312 {
313 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
314 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
315 	unsigned int authlen = crypto_aead_authsize(aead);
316 	struct omap_aes_dev *dd;
317 	__be32 counter = cpu_to_be32(1);
318 	int err, assoclen;
319 
320 	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
321 	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
322 
323 	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
324 	if (err)
325 		return err;
326 
327 	if (mode & FLAGS_RFC4106_GCM)
328 		assoclen = req->assoclen - 8;
329 	else
330 		assoclen = req->assoclen;
331 	if (assoclen + req->cryptlen == 0) {
332 		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
333 					 1);
334 		return 0;
335 	}
336 
337 	dd = omap_aes_find_dev(rctx);
338 	if (!dd)
339 		return -ENODEV;
340 	rctx->mode = mode;
341 
342 	return omap_aes_gcm_handle_queue(dd, req);
343 }
344 
345 int omap_aes_gcm_encrypt(struct aead_request *req)
346 {
347 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
348 
349 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
350 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
351 }
352 
353 int omap_aes_gcm_decrypt(struct aead_request *req)
354 {
355 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
356 
357 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
358 	return omap_aes_gcm_crypt(req, FLAGS_GCM);
359 }
360 
361 int omap_aes_4106gcm_encrypt(struct aead_request *req)
362 {
363 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
364 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
365 
366 	memcpy(rctx->iv, ctx->nonce, 4);
367 	memcpy(rctx->iv + 4, req->iv, 8);
368 	return crypto_ipsec_check_assoclen(req->assoclen) ?:
369 	       omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
370 				  FLAGS_RFC4106_GCM);
371 }
372 
373 int omap_aes_4106gcm_decrypt(struct aead_request *req)
374 {
375 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
376 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
377 
378 	memcpy(rctx->iv, ctx->nonce, 4);
379 	memcpy(rctx->iv + 4, req->iv, 8);
380 	return crypto_ipsec_check_assoclen(req->assoclen) ?:
381 	       omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
382 }
383 
384 int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
385 			unsigned int keylen)
386 {
387 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
388 
389 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
390 	    keylen != AES_KEYSIZE_256)
391 		return -EINVAL;
392 
393 	memcpy(ctx->key, key, keylen);
394 	ctx->keylen = keylen;
395 
396 	return 0;
397 }
398 
399 int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
400 			    unsigned int keylen)
401 {
402 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
403 
404 	if (keylen < 4)
405 		return -EINVAL;
406 
407 	keylen -= 4;
408 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
409 	    keylen != AES_KEYSIZE_256)
410 		return -EINVAL;
411 
412 	memcpy(ctx->key, key, keylen);
413 	memcpy(ctx->nonce, key + keylen, 4);
414 	ctx->keylen = keylen;
415 
416 	return 0;
417 }
418 
419 int omap_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
420 {
421 	return crypto_gcm_check_authsize(authsize);
422 }
423 
424 int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
425 				 unsigned int authsize)
426 {
427 	return crypto_rfc4106_check_authsize(authsize);
428 }
429