xref: /openbmc/linux/drivers/crypto/omap-aes-gcm.c (revision 5d0e4d78)
1 /*
2  * Cryptographic API.
3  *
4  * Support for OMAP AES GCM HW acceleration.
5  *
6  * Copyright (c) 2016 Texas Instruments Incorporated
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as published
10  * by the Free Software Foundation.
11  *
12  */
13 
14 #include <linux/errno.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmaengine.h>
18 #include <linux/omap-dma.h>
19 #include <linux/interrupt.h>
20 #include <crypto/aes.h>
21 #include <crypto/scatterwalk.h>
22 #include <crypto/skcipher.h>
23 #include <crypto/internal/aead.h>
24 
25 #include "omap-crypto.h"
26 #include "omap-aes.h"
27 
28 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
29 				     struct aead_request *req);
30 
31 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
32 {
33 	struct aead_request *req = dd->aead_req;
34 
35 	dd->flags &= ~FLAGS_BUSY;
36 	dd->in_sg = NULL;
37 	dd->out_sg = NULL;
38 
39 	req->base.complete(&req->base, ret);
40 }
41 
42 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
43 {
44 	u8 *tag;
45 	int alen, clen, i, ret = 0, nsg;
46 	struct omap_aes_reqctx *rctx;
47 
48 	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
49 	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
50 	rctx = aead_request_ctx(dd->aead_req);
51 
52 	nsg = !!(dd->assoc_len && dd->total);
53 
54 	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
55 			       DMA_FROM_DEVICE);
56 	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
57 	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
58 	omap_aes_crypt_dma_stop(dd);
59 
60 	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
61 			    dd->aead_req->assoclen, dd->total,
62 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
63 
64 	if (dd->flags & FLAGS_ENCRYPT)
65 		scatterwalk_map_and_copy(rctx->auth_tag,
66 					 dd->aead_req->dst,
67 					 dd->total + dd->aead_req->assoclen,
68 					 dd->authsize, 1);
69 
70 	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
71 			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
72 
73 	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
74 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
75 
76 	if (!(dd->flags & FLAGS_ENCRYPT)) {
77 		tag = (u8 *)rctx->auth_tag;
78 		for (i = 0; i < dd->authsize; i++) {
79 			if (tag[i]) {
80 				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
81 				ret = -EBADMSG;
82 			}
83 		}
84 	}
85 
86 	omap_aes_gcm_finish_req(dd, ret);
87 	omap_aes_gcm_handle_queue(dd, NULL);
88 }
89 
90 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
91 				     struct aead_request *req)
92 {
93 	int alen, clen, cryptlen, assoclen, ret;
94 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
95 	unsigned int authlen = crypto_aead_authsize(aead);
96 	struct scatterlist *tmp, sg_arr[2];
97 	int nsg;
98 	u16 flags;
99 
100 	assoclen = req->assoclen;
101 	cryptlen = req->cryptlen;
102 
103 	if (dd->flags & FLAGS_RFC4106_GCM)
104 		assoclen -= 8;
105 
106 	if (!(dd->flags & FLAGS_ENCRYPT))
107 		cryptlen -= authlen;
108 
109 	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
110 	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
111 
112 	nsg = !!(assoclen && cryptlen);
113 
114 	omap_aes_clear_copy_flags(dd);
115 
116 	sg_init_table(dd->in_sgl, nsg + 1);
117 	if (assoclen) {
118 		tmp = req->src;
119 		ret = omap_crypto_align_sg(&tmp, assoclen,
120 					   AES_BLOCK_SIZE, dd->in_sgl,
121 					   OMAP_CRYPTO_COPY_DATA |
122 					   OMAP_CRYPTO_ZERO_BUF |
123 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
124 					   FLAGS_ASSOC_DATA_ST_SHIFT,
125 					   &dd->flags);
126 	}
127 
128 	if (cryptlen) {
129 		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
130 
131 		ret = omap_crypto_align_sg(&tmp, cryptlen,
132 					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
133 					   OMAP_CRYPTO_COPY_DATA |
134 					   OMAP_CRYPTO_ZERO_BUF |
135 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
136 					   FLAGS_IN_DATA_ST_SHIFT,
137 					   &dd->flags);
138 	}
139 
140 	dd->in_sg = dd->in_sgl;
141 	dd->total = cryptlen;
142 	dd->assoc_len = assoclen;
143 	dd->authsize = authlen;
144 
145 	dd->out_sg = req->dst;
146 	dd->orig_out = req->dst;
147 
148 	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
149 
150 	flags = 0;
151 	if (req->src == req->dst || dd->out_sg == sg_arr)
152 		flags |= OMAP_CRYPTO_FORCE_COPY;
153 
154 	ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
155 				   AES_BLOCK_SIZE, &dd->out_sgl,
156 				   flags,
157 				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
158 	if (ret)
159 		return ret;
160 
161 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
162 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
163 
164 	return 0;
165 }
166 
167 static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
168 {
169 	struct omap_aes_gcm_result *res = req->data;
170 
171 	if (err == -EINPROGRESS)
172 		return;
173 
174 	res->err = err;
175 	complete(&res->completion);
176 }
177 
178 static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
179 {
180 	struct scatterlist iv_sg, tag_sg;
181 	struct skcipher_request *sk_req;
182 	struct omap_aes_gcm_result result;
183 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
184 	int ret = 0;
185 
186 	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
187 	if (!sk_req) {
188 		pr_err("skcipher: Failed to allocate request\n");
189 		return -1;
190 	}
191 
192 	init_completion(&result.completion);
193 
194 	sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
195 	sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
196 	skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
197 				      omap_aes_gcm_complete, &result);
198 	ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
199 	skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
200 				   NULL);
201 	ret = crypto_skcipher_encrypt(sk_req);
202 	switch (ret) {
203 	case 0:
204 		break;
205 	case -EINPROGRESS:
206 	case -EBUSY:
207 		ret = wait_for_completion_interruptible(&result.completion);
208 		if (!ret) {
209 			ret = result.err;
210 			if (!ret) {
211 				reinit_completion(&result.completion);
212 				break;
213 			}
214 		}
215 		/* fall through */
216 	default:
217 		pr_err("Encryption of IV failed for GCM mode");
218 		break;
219 	}
220 
221 	skcipher_request_free(sk_req);
222 	return ret;
223 }
224 
225 void omap_aes_gcm_dma_out_callback(void *data)
226 {
227 	struct omap_aes_dev *dd = data;
228 	struct omap_aes_reqctx *rctx;
229 	int i, val;
230 	u32 *auth_tag, tag[4];
231 
232 	if (!(dd->flags & FLAGS_ENCRYPT))
233 		scatterwalk_map_and_copy(tag, dd->aead_req->src,
234 					 dd->total + dd->aead_req->assoclen,
235 					 dd->authsize, 0);
236 
237 	rctx = aead_request_ctx(dd->aead_req);
238 	auth_tag = (u32 *)rctx->auth_tag;
239 	for (i = 0; i < 4; i++) {
240 		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
241 		auth_tag[i] = val ^ auth_tag[i];
242 		if (!(dd->flags & FLAGS_ENCRYPT))
243 			auth_tag[i] = auth_tag[i] ^ tag[i];
244 	}
245 
246 	omap_aes_gcm_done_task(dd);
247 }
248 
249 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
250 				     struct aead_request *req)
251 {
252 	struct omap_aes_ctx *ctx;
253 	struct aead_request *backlog;
254 	struct omap_aes_reqctx *rctx;
255 	unsigned long flags;
256 	int err, ret = 0;
257 
258 	spin_lock_irqsave(&dd->lock, flags);
259 	if (req)
260 		ret = aead_enqueue_request(&dd->aead_queue, req);
261 	if (dd->flags & FLAGS_BUSY) {
262 		spin_unlock_irqrestore(&dd->lock, flags);
263 		return ret;
264 	}
265 
266 	backlog = aead_get_backlog(&dd->aead_queue);
267 	req = aead_dequeue_request(&dd->aead_queue);
268 	if (req)
269 		dd->flags |= FLAGS_BUSY;
270 	spin_unlock_irqrestore(&dd->lock, flags);
271 
272 	if (!req)
273 		return ret;
274 
275 	if (backlog)
276 		backlog->base.complete(&backlog->base, -EINPROGRESS);
277 
278 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
279 	rctx = aead_request_ctx(req);
280 
281 	dd->ctx = ctx;
282 	rctx->dd = dd;
283 	dd->aead_req = req;
284 
285 	rctx->mode &= FLAGS_MODE_MASK;
286 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
287 
288 	err = omap_aes_gcm_copy_buffers(dd, req);
289 	if (err)
290 		return err;
291 
292 	err = omap_aes_write_ctrl(dd);
293 	if (!err)
294 		err = omap_aes_crypt_dma_start(dd);
295 
296 	if (err) {
297 		omap_aes_gcm_finish_req(dd, err);
298 		omap_aes_gcm_handle_queue(dd, NULL);
299 	}
300 
301 	return ret;
302 }
303 
304 static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
305 {
306 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
307 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
308 	unsigned int authlen = crypto_aead_authsize(aead);
309 	struct omap_aes_dev *dd;
310 	__be32 counter = cpu_to_be32(1);
311 	int err, assoclen;
312 
313 	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
314 	memcpy(rctx->iv + 12, &counter, 4);
315 
316 	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
317 	if (err)
318 		return err;
319 
320 	if (mode & FLAGS_RFC4106_GCM)
321 		assoclen = req->assoclen - 8;
322 	else
323 		assoclen = req->assoclen;
324 	if (assoclen + req->cryptlen == 0) {
325 		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
326 					 1);
327 		return 0;
328 	}
329 
330 	dd = omap_aes_find_dev(rctx);
331 	if (!dd)
332 		return -ENODEV;
333 	rctx->mode = mode;
334 
335 	return omap_aes_gcm_handle_queue(dd, req);
336 }
337 
338 int omap_aes_gcm_encrypt(struct aead_request *req)
339 {
340 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
341 
342 	memcpy(rctx->iv, req->iv, 12);
343 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
344 }
345 
346 int omap_aes_gcm_decrypt(struct aead_request *req)
347 {
348 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
349 
350 	memcpy(rctx->iv, req->iv, 12);
351 	return omap_aes_gcm_crypt(req, FLAGS_GCM);
352 }
353 
354 int omap_aes_4106gcm_encrypt(struct aead_request *req)
355 {
356 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
357 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
358 
359 	memcpy(rctx->iv, ctx->nonce, 4);
360 	memcpy(rctx->iv + 4, req->iv, 8);
361 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
362 				  FLAGS_RFC4106_GCM);
363 }
364 
365 int omap_aes_4106gcm_decrypt(struct aead_request *req)
366 {
367 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
368 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
369 
370 	memcpy(rctx->iv, ctx->nonce, 4);
371 	memcpy(rctx->iv + 4, req->iv, 8);
372 	return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
373 }
374 
375 int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
376 			unsigned int keylen)
377 {
378 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
379 
380 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
381 	    keylen != AES_KEYSIZE_256)
382 		return -EINVAL;
383 
384 	memcpy(ctx->key, key, keylen);
385 	ctx->keylen = keylen;
386 
387 	return 0;
388 }
389 
390 int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
391 			    unsigned int keylen)
392 {
393 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
394 
395 	if (keylen < 4)
396 		return -EINVAL;
397 
398 	keylen -= 4;
399 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
400 	    keylen != AES_KEYSIZE_256)
401 		return -EINVAL;
402 
403 	memcpy(ctx->key, key, keylen);
404 	memcpy(ctx->nonce, key + keylen, 4);
405 	ctx->keylen = keylen;
406 
407 	return 0;
408 }
409