xref: /openbmc/linux/drivers/crypto/omap-aes-gcm.c (revision 2359ccdd)
1 /*
2  * Cryptographic API.
3  *
4  * Support for OMAP AES GCM HW acceleration.
5  *
6  * Copyright (c) 2016 Texas Instruments Incorporated
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as published
10  * by the Free Software Foundation.
11  *
12  */
13 
14 #include <linux/errno.h>
15 #include <linux/scatterlist.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmaengine.h>
18 #include <linux/omap-dma.h>
19 #include <linux/interrupt.h>
20 #include <crypto/aes.h>
21 #include <crypto/gcm.h>
22 #include <crypto/scatterwalk.h>
23 #include <crypto/skcipher.h>
24 #include <crypto/internal/aead.h>
25 
26 #include "omap-crypto.h"
27 #include "omap-aes.h"
28 
29 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
30 				     struct aead_request *req);
31 
32 static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
33 {
34 	struct aead_request *req = dd->aead_req;
35 
36 	dd->flags &= ~FLAGS_BUSY;
37 	dd->in_sg = NULL;
38 	dd->out_sg = NULL;
39 
40 	req->base.complete(&req->base, ret);
41 }
42 
43 static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
44 {
45 	u8 *tag;
46 	int alen, clen, i, ret = 0, nsg;
47 	struct omap_aes_reqctx *rctx;
48 
49 	alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
50 	clen = ALIGN(dd->total, AES_BLOCK_SIZE);
51 	rctx = aead_request_ctx(dd->aead_req);
52 
53 	nsg = !!(dd->assoc_len && dd->total);
54 
55 	dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
56 			       DMA_FROM_DEVICE);
57 	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
58 	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
59 	omap_aes_crypt_dma_stop(dd);
60 
61 	omap_crypto_cleanup(dd->out_sg, dd->orig_out,
62 			    dd->aead_req->assoclen, dd->total,
63 			    FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
64 
65 	if (dd->flags & FLAGS_ENCRYPT)
66 		scatterwalk_map_and_copy(rctx->auth_tag,
67 					 dd->aead_req->dst,
68 					 dd->total + dd->aead_req->assoclen,
69 					 dd->authsize, 1);
70 
71 	omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
72 			    FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
73 
74 	omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
75 			    FLAGS_IN_DATA_ST_SHIFT, dd->flags);
76 
77 	if (!(dd->flags & FLAGS_ENCRYPT)) {
78 		tag = (u8 *)rctx->auth_tag;
79 		for (i = 0; i < dd->authsize; i++) {
80 			if (tag[i]) {
81 				dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
82 				ret = -EBADMSG;
83 			}
84 		}
85 	}
86 
87 	omap_aes_gcm_finish_req(dd, ret);
88 	omap_aes_gcm_handle_queue(dd, NULL);
89 }
90 
91 static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
92 				     struct aead_request *req)
93 {
94 	int alen, clen, cryptlen, assoclen, ret;
95 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
96 	unsigned int authlen = crypto_aead_authsize(aead);
97 	struct scatterlist *tmp, sg_arr[2];
98 	int nsg;
99 	u16 flags;
100 
101 	assoclen = req->assoclen;
102 	cryptlen = req->cryptlen;
103 
104 	if (dd->flags & FLAGS_RFC4106_GCM)
105 		assoclen -= 8;
106 
107 	if (!(dd->flags & FLAGS_ENCRYPT))
108 		cryptlen -= authlen;
109 
110 	alen = ALIGN(assoclen, AES_BLOCK_SIZE);
111 	clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
112 
113 	nsg = !!(assoclen && cryptlen);
114 
115 	omap_aes_clear_copy_flags(dd);
116 
117 	sg_init_table(dd->in_sgl, nsg + 1);
118 	if (assoclen) {
119 		tmp = req->src;
120 		ret = omap_crypto_align_sg(&tmp, assoclen,
121 					   AES_BLOCK_SIZE, dd->in_sgl,
122 					   OMAP_CRYPTO_COPY_DATA |
123 					   OMAP_CRYPTO_ZERO_BUF |
124 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
125 					   FLAGS_ASSOC_DATA_ST_SHIFT,
126 					   &dd->flags);
127 	}
128 
129 	if (cryptlen) {
130 		tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
131 
132 		ret = omap_crypto_align_sg(&tmp, cryptlen,
133 					   AES_BLOCK_SIZE, &dd->in_sgl[nsg],
134 					   OMAP_CRYPTO_COPY_DATA |
135 					   OMAP_CRYPTO_ZERO_BUF |
136 					   OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
137 					   FLAGS_IN_DATA_ST_SHIFT,
138 					   &dd->flags);
139 	}
140 
141 	dd->in_sg = dd->in_sgl;
142 	dd->total = cryptlen;
143 	dd->assoc_len = assoclen;
144 	dd->authsize = authlen;
145 
146 	dd->out_sg = req->dst;
147 	dd->orig_out = req->dst;
148 
149 	dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
150 
151 	flags = 0;
152 	if (req->src == req->dst || dd->out_sg == sg_arr)
153 		flags |= OMAP_CRYPTO_FORCE_COPY;
154 
155 	ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
156 				   AES_BLOCK_SIZE, &dd->out_sgl,
157 				   flags,
158 				   FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
159 	if (ret)
160 		return ret;
161 
162 	dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
163 	dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
164 
165 	return 0;
166 }
167 
168 static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
169 {
170 	struct omap_aes_gcm_result *res = req->data;
171 
172 	if (err == -EINPROGRESS)
173 		return;
174 
175 	res->err = err;
176 	complete(&res->completion);
177 }
178 
179 static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
180 {
181 	struct scatterlist iv_sg, tag_sg;
182 	struct skcipher_request *sk_req;
183 	struct omap_aes_gcm_result result;
184 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
185 	int ret = 0;
186 
187 	sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
188 	if (!sk_req) {
189 		pr_err("skcipher: Failed to allocate request\n");
190 		return -ENOMEM;
191 	}
192 
193 	init_completion(&result.completion);
194 
195 	sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
196 	sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
197 	skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
198 				      omap_aes_gcm_complete, &result);
199 	ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
200 	skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
201 				   NULL);
202 	ret = crypto_skcipher_encrypt(sk_req);
203 	switch (ret) {
204 	case 0:
205 		break;
206 	case -EINPROGRESS:
207 	case -EBUSY:
208 		ret = wait_for_completion_interruptible(&result.completion);
209 		if (!ret) {
210 			ret = result.err;
211 			if (!ret) {
212 				reinit_completion(&result.completion);
213 				break;
214 			}
215 		}
216 		/* fall through */
217 	default:
218 		pr_err("Encryption of IV failed for GCM mode\n");
219 		break;
220 	}
221 
222 	skcipher_request_free(sk_req);
223 	return ret;
224 }
225 
226 void omap_aes_gcm_dma_out_callback(void *data)
227 {
228 	struct omap_aes_dev *dd = data;
229 	struct omap_aes_reqctx *rctx;
230 	int i, val;
231 	u32 *auth_tag, tag[4];
232 
233 	if (!(dd->flags & FLAGS_ENCRYPT))
234 		scatterwalk_map_and_copy(tag, dd->aead_req->src,
235 					 dd->total + dd->aead_req->assoclen,
236 					 dd->authsize, 0);
237 
238 	rctx = aead_request_ctx(dd->aead_req);
239 	auth_tag = (u32 *)rctx->auth_tag;
240 	for (i = 0; i < 4; i++) {
241 		val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
242 		auth_tag[i] = val ^ auth_tag[i];
243 		if (!(dd->flags & FLAGS_ENCRYPT))
244 			auth_tag[i] = auth_tag[i] ^ tag[i];
245 	}
246 
247 	omap_aes_gcm_done_task(dd);
248 }
249 
250 static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
251 				     struct aead_request *req)
252 {
253 	struct omap_aes_ctx *ctx;
254 	struct aead_request *backlog;
255 	struct omap_aes_reqctx *rctx;
256 	unsigned long flags;
257 	int err, ret = 0;
258 
259 	spin_lock_irqsave(&dd->lock, flags);
260 	if (req)
261 		ret = aead_enqueue_request(&dd->aead_queue, req);
262 	if (dd->flags & FLAGS_BUSY) {
263 		spin_unlock_irqrestore(&dd->lock, flags);
264 		return ret;
265 	}
266 
267 	backlog = aead_get_backlog(&dd->aead_queue);
268 	req = aead_dequeue_request(&dd->aead_queue);
269 	if (req)
270 		dd->flags |= FLAGS_BUSY;
271 	spin_unlock_irqrestore(&dd->lock, flags);
272 
273 	if (!req)
274 		return ret;
275 
276 	if (backlog)
277 		backlog->base.complete(&backlog->base, -EINPROGRESS);
278 
279 	ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
280 	rctx = aead_request_ctx(req);
281 
282 	dd->ctx = ctx;
283 	rctx->dd = dd;
284 	dd->aead_req = req;
285 
286 	rctx->mode &= FLAGS_MODE_MASK;
287 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
288 
289 	err = omap_aes_gcm_copy_buffers(dd, req);
290 	if (err)
291 		return err;
292 
293 	err = omap_aes_write_ctrl(dd);
294 	if (!err)
295 		err = omap_aes_crypt_dma_start(dd);
296 
297 	if (err) {
298 		omap_aes_gcm_finish_req(dd, err);
299 		omap_aes_gcm_handle_queue(dd, NULL);
300 	}
301 
302 	return ret;
303 }
304 
305 static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
306 {
307 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
308 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
309 	unsigned int authlen = crypto_aead_authsize(aead);
310 	struct omap_aes_dev *dd;
311 	__be32 counter = cpu_to_be32(1);
312 	int err, assoclen;
313 
314 	memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
315 	memcpy(rctx->iv + GCM_AES_IV_SIZE, &counter, 4);
316 
317 	err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
318 	if (err)
319 		return err;
320 
321 	if (mode & FLAGS_RFC4106_GCM)
322 		assoclen = req->assoclen - 8;
323 	else
324 		assoclen = req->assoclen;
325 	if (assoclen + req->cryptlen == 0) {
326 		scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
327 					 1);
328 		return 0;
329 	}
330 
331 	dd = omap_aes_find_dev(rctx);
332 	if (!dd)
333 		return -ENODEV;
334 	rctx->mode = mode;
335 
336 	return omap_aes_gcm_handle_queue(dd, req);
337 }
338 
339 int omap_aes_gcm_encrypt(struct aead_request *req)
340 {
341 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
342 
343 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
344 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
345 }
346 
347 int omap_aes_gcm_decrypt(struct aead_request *req)
348 {
349 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
350 
351 	memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
352 	return omap_aes_gcm_crypt(req, FLAGS_GCM);
353 }
354 
355 int omap_aes_4106gcm_encrypt(struct aead_request *req)
356 {
357 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
358 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
359 
360 	memcpy(rctx->iv, ctx->nonce, 4);
361 	memcpy(rctx->iv + 4, req->iv, 8);
362 	return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
363 				  FLAGS_RFC4106_GCM);
364 }
365 
366 int omap_aes_4106gcm_decrypt(struct aead_request *req)
367 {
368 	struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
369 	struct omap_aes_reqctx *rctx = aead_request_ctx(req);
370 
371 	memcpy(rctx->iv, ctx->nonce, 4);
372 	memcpy(rctx->iv + 4, req->iv, 8);
373 	return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
374 }
375 
376 int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
377 			unsigned int keylen)
378 {
379 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
380 
381 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
382 	    keylen != AES_KEYSIZE_256)
383 		return -EINVAL;
384 
385 	memcpy(ctx->key, key, keylen);
386 	ctx->keylen = keylen;
387 
388 	return 0;
389 }
390 
391 int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
392 			    unsigned int keylen)
393 {
394 	struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
395 
396 	if (keylen < 4)
397 		return -EINVAL;
398 
399 	keylen -= 4;
400 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
401 	    keylen != AES_KEYSIZE_256)
402 		return -EINVAL;
403 
404 	memcpy(ctx->key, key, keylen);
405 	memcpy(ctx->nonce, key + keylen, 4);
406 	ctx->keylen = keylen;
407 
408 	return 0;
409 }
410