xref: /openbmc/linux/drivers/crypto/omap-aes-gcm.c (revision 3dc4b6fb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  *
5  * Support for OMAP AES GCM HW acceleration.
6  *
7  * Copyright (c) 2016 Texas Instruments Incorporated
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/scatterlist.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/omap-dma.h>
15 #include <linux/interrupt.h>
16 #include <crypto/aes.h>
17 #include <crypto/gcm.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/skcipher.h>
20 #include <crypto/internal/aead.h>
21 
22 #include "omap-crypto.h"
23 #include "omap-aes.h"
24 
25 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
26 				     struct aead_request *req);
27 
28 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
29 {
30 	struct aead_request *req = dd->aead_req;
31 
32 	dd->flags &= ~FLAGS_BUSY;
33 	dd->in_sg = NULL;
34 	dd->out_sg = NULL;
35 
36 	req->base.complete(&req->base, ret);
37 }
38 
39 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
40 {
41 	u8 *tag;
42 	int alen, clen, i, ret = 0, nsg;
43 	struct omap_aes_reqctx *rctx;
44 
45 	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
46 	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
47 	rctx = aead_request_ctx(dd->aead_req);
48 
49 	nsg = !!(dd->assoc_len && dd->total);
50 
51 	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
52 			       DMA_FROM_DEVICE);
53 	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
54 	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
55 	omap_aes_crypt_dma_stop(dd);
56 
57 	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
58 			    dd->aead_req->assoclen, dd->total,
59 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
60 
61 	if (dd->flags & FLAGS_ENCRYPT)
62 		scatterwalk_map_and_copy(rctx->auth_tag,
63 					 dd->aead_req->dst,
64 					 dd->total + dd->aead_req->assoclen,
65 					 dd->authsize, 1);
66 
67 	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
68 			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
69 
70 	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
71 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
72 
73 	if (!(dd->flags & FLAGS_ENCRYPT)) {
74 		tag = (u8 *)rctx->auth_tag;
75 		for (i = 0; i < dd->authsize; i++) {
76 			if (tag[i]) {
77 				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
78 				ret = -EBADMSG;
79 			}
80 		}
81 	}
82 
83 	omap_aes_gcm_finish_req(dd, ret);
84 	omap_aes_gcm_handle_queue(dd, NULL);
85 }
86 
87 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
88 				     struct aead_request *req)
89 {
90 	int alen, clen, cryptlen, assoclen, ret;
91 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
92 	unsigned int authlen = crypto_aead_authsize(aead);
93 	struct scatterlist *tmp, sg_arr[2];
94 	int nsg;
95 	u16 flags;
96 
97 	assoclen = req->assoclen;
98 	cryptlen = req->cryptlen;
99 
100 	if (dd->flags & FLAGS_RFC4106_GCM)
101 		assoclen -= 8;
102 
103 	if (!(dd->flags & FLAGS_ENCRYPT))
104 		cryptlen -= authlen;
105 
106 	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
107 	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
108 
109 	nsg = !!(assoclen && cryptlen);
110 
111 	omap_aes_clear_copy_flags(dd);
112 
113 	sg_init_table(dd->in_sgl, nsg + 1);
114 	if (assoclen) {
115 		tmp = req->src;
116 		ret = omap_crypto_align_sg(&tmp, assoclen,
117 					   AES_BLOCK_SIZE, dd->in_sgl,
118 					   OMAP_CRYPTO_COPY_DATA |
119 					   OMAP_CRYPTO_ZERO_BUF |
120 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
121 					   FLAGS_ASSOC_DATA_ST_SHIFT,
122 					   &dd->flags);
123 	}
124 
125 	if (cryptlen) {
126 		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
127 
128 		ret = omap_crypto_align_sg(&tmp, cryptlen,
129 					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
130 					   OMAP_CRYPTO_COPY_DATA |
131 					   OMAP_CRYPTO_ZERO_BUF |
132 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
133 					   FLAGS_IN_DATA_ST_SHIFT,
134 					   &dd->flags);
135 	}
136 
137 	dd->in_sg = dd->in_sgl;
138 	dd->total = cryptlen;
139 	dd->assoc_len = assoclen;
140 	dd->authsize = authlen;
141 
142 	dd->out_sg = req->dst;
143 	dd->orig_out = req->dst;
144 
145 	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
146 
147 	flags = 0;
148 	if (req->src == req->dst || dd->out_sg == sg_arr)
149 		flags |= OMAP_CRYPTO_FORCE_COPY;
150 
151 	ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
152 				   AES_BLOCK_SIZE, &dd->out_sgl,
153 				   flags,
154 				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
155 	if (ret)
156 		return ret;
157 
158 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
159 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
160 
161 	return 0;
162 }
163 
164 static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
165 {
166 	struct omap_aes_gcm_result *res = req->data;
167 
168 	if (err == -EINPROGRESS)
169 		return;
170 
171 	res->err = err;
172 	complete(&res->completion);
173 }
174 
175 static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
176 {
177 	struct scatterlist iv_sg, tag_sg;
178 	struct skcipher_request *sk_req;
179 	struct omap_aes_gcm_result result;
180 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
181 	int ret = 0;
182 
183 	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
184 	if (!sk_req) {
185 		pr_err("skcipher: Failed to allocate request\n");
186 		return -ENOMEM;
187 	}
188 
189 	init_completion(&result.completion);
190 
191 	sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
192 	sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
193 	skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
194 				      omap_aes_gcm_complete, &result);
195 	ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
196 	skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
197 				   NULL);
198 	ret = crypto_skcipher_encrypt(sk_req);
199 	switch (ret) {
200 	case 0:
201 		break;
202 	case -EINPROGRESS:
203 	case -EBUSY:
204 		ret = wait_for_completion_interruptible(&result.completion);
205 		if (!ret) {
206 			ret = result.err;
207 			if (!ret) {
208 				reinit_completion(&result.completion);
209 				break;
210 			}
211 		}
212 		/* fall through */
213 	default:
214 		pr_err("Encryption of IV failed for GCM mode\n");
215 		break;
216 	}
217 
218 	skcipher_request_free(sk_req);
219 	return ret;
220 }
221 
222 void omap_aes_gcm_dma_out_callback(void *data)
223 {
224 	struct omap_aes_dev *dd = data;
225 	struct omap_aes_reqctx *rctx;
226 	int i, val;
227 	u32 *auth_tag, tag[4];
228 
229 	if (!(dd->flags & FLAGS_ENCRYPT))
230 		scatterwalk_map_and_copy(tag, dd->aead_req->src,
231 					 dd->total + dd->aead_req->assoclen,
232 					 dd->authsize, 0);
233 
234 	rctx = aead_request_ctx(dd->aead_req);
235 	auth_tag = (u32 *)rctx->auth_tag;
236 	for (i = 0; i < 4; i++) {
237 		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
238 		auth_tag[i] = val ^ auth_tag[i];
239 		if (!(dd->flags & FLAGS_ENCRYPT))
240 			auth_tag[i] = auth_tag[i] ^ tag[i];
241 	}
242 
243 	omap_aes_gcm_done_task(dd);
244 }
245 
246 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
247 				     struct aead_request *req)
248 {
249 	struct omap_aes_ctx *ctx;
250 	struct aead_request *backlog;
251 	struct omap_aes_reqctx *rctx;
252 	unsigned long flags;
253 	int err, ret = 0;
254 
255 	spin_lock_irqsave(&dd->lock, flags);
256 	if (req)
257 		ret = aead_enqueue_request(&dd->aead_queue, req);
258 	if (dd->flags & FLAGS_BUSY) {
259 		spin_unlock_irqrestore(&dd->lock, flags);
260 		return ret;
261 	}
262 
263 	backlog = aead_get_backlog(&dd->aead_queue);
264 	req = aead_dequeue_request(&dd->aead_queue);
265 	if (req)
266 		dd->flags |= FLAGS_BUSY;
267 	spin_unlock_irqrestore(&dd->lock, flags);
268 
269 	if (!req)
270 		return ret;
271 
272 	if (backlog)
273 		backlog->base.complete(&backlog->base, -EINPROGRESS);
274 
275 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
276 	rctx = aead_request_ctx(req);
277 
278 	dd->ctx = ctx;
279 	rctx->dd = dd;
280 	dd->aead_req = req;
281 
282 	rctx->mode &= FLAGS_MODE_MASK;
283 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
284 
285 	err = omap_aes_gcm_copy_buffers(dd, req);
286 	if (err)
287 		return err;
288 
289 	err = omap_aes_write_ctrl(dd);
290 	if (!err)
291 		err = omap_aes_crypt_dma_start(dd);
292 
293 	if (err) {
294 		omap_aes_gcm_finish_req(dd, err);
295 		omap_aes_gcm_handle_queue(dd, NULL);
296 	}
297 
298 	return ret;
299 }
300 
301 static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
302 {
303 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
304 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
305 	unsigned int authlen = crypto_aead_authsize(aead);
306 	struct omap_aes_dev *dd;
307 	__be32 counter = cpu_to_be32(1);
308 	int err, assoclen;
309 
310 	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
311 	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
312 
313 	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
314 	if (err)
315 		return err;
316 
317 	if (mode & FLAGS_RFC4106_GCM)
318 		assoclen = req->assoclen - 8;
319 	else
320 		assoclen = req->assoclen;
321 	if (assoclen + req->cryptlen == 0) {
322 		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
323 					 1);
324 		return 0;
325 	}
326 
327 	dd = omap_aes_find_dev(rctx);
328 	if (!dd)
329 		return -ENODEV;
330 	rctx->mode = mode;
331 
332 	return omap_aes_gcm_handle_queue(dd, req);
333 }
334 
335 int omap_aes_gcm_encrypt(struct aead_request *req)
336 {
337 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
338 
339 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
340 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
341 }
342 
343 int omap_aes_gcm_decrypt(struct aead_request *req)
344 {
345 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
346 
347 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
348 	return omap_aes_gcm_crypt(req, FLAGS_GCM);
349 }
350 
351 int omap_aes_4106gcm_encrypt(struct aead_request *req)
352 {
353 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
354 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
355 
356 	memcpy(rctx->iv, ctx->nonce, 4);
357 	memcpy(rctx->iv + 4, req->iv, 8);
358 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
359 				  FLAGS_RFC4106_GCM);
360 }
361 
362 int omap_aes_4106gcm_decrypt(struct aead_request *req)
363 {
364 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
365 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
366 
367 	memcpy(rctx->iv, ctx->nonce, 4);
368 	memcpy(rctx->iv + 4, req->iv, 8);
369 	return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
370 }
371 
372 int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
373 			unsigned int keylen)
374 {
375 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
376 
377 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
378 	    keylen != AES_KEYSIZE_256)
379 		return -EINVAL;
380 
381 	memcpy(ctx->key, key, keylen);
382 	ctx->keylen = keylen;
383 
384 	return 0;
385 }
386 
387 int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
388 			    unsigned int keylen)
389 {
390 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
391 
392 	if (keylen < 4)
393 		return -EINVAL;
394 
395 	keylen -= 4;
396 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
397 	    keylen != AES_KEYSIZE_256)
398 		return -EINVAL;
399 
400 	memcpy(ctx->key, key, keylen);
401 	memcpy(ctx->nonce, key + keylen, 4);
402 	ctx->keylen = keylen;
403 
404 	return 0;
405 }
406