1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * caam - Freescale FSL CAAM support for ahash functions of crypto API
4 *
5 * Copyright 2011 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
7 *
8 * Based on caamalg.c crypto API driver.
9 *
10 * relationship of digest job descriptor or first job descriptor after init to
11 * shared descriptors:
12 *
13 * --------------- ---------------
14 * | JobDesc #1 |-------------------->| ShareDesc |
15 * | *(packet 1) | | (hashKey) |
16 * --------------- | (operation) |
17 * ---------------
18 *
19 * relationship of subsequent job descriptors to shared descriptors:
20 *
21 * --------------- ---------------
22 * | JobDesc #2 |-------------------->| ShareDesc |
23 * | *(packet 2) | |------------->| (hashKey) |
24 * --------------- | |-------->| (operation) |
25 * . | | | (load ctx2) |
26 * . | | ---------------
27 * --------------- | |
28 * | JobDesc #3 |------| |
29 * | *(packet 3) | |
30 * --------------- |
31 * . |
32 * . |
33 * --------------- |
34 * | JobDesc #4 |------------
35 * | *(packet 4) |
36 * ---------------
37 *
38 * The SharedDesc never changes for a connection unless rekeyed, but
39 * each packet will likely be in a different place. So all we need
40 * to know to process the packet is where the input is, where the
41 * output goes, and what context we want to process with. Context is
42 * in the SharedDesc, packet references in the JobDesc.
43 *
44 * So, a job desc looks like:
45 *
46 * ---------------------
47 * | Header |
48 * | ShareDesc Pointer |
49 * | SEQ_OUT_PTR |
50 * | (output buffer) |
51 * | (output length) |
52 * | SEQ_IN_PTR |
53 * | (input buffer) |
54 * | (input length) |
55 * ---------------------
56 */
57
58 #include "compat.h"
59
60 #include "regs.h"
61 #include "intern.h"
62 #include "desc_constr.h"
63 #include "jr.h"
64 #include "error.h"
65 #include "sg_sw_sec4.h"
66 #include "key_gen.h"
67 #include "caamhash_desc.h"
68 #include <crypto/internal/engine.h>
69 #include <crypto/internal/hash.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/err.h>
72 #include <linux/kernel.h>
73 #include <linux/slab.h>
74 #include <linux/string.h>
75
76 #define CAAM_CRA_PRIORITY 3000
77
78 /* max hash key is max split key size */
79 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
80
81 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
82 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
83
84 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
85 CAAM_MAX_HASH_KEY_SIZE)
86 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
87
88 /* caam context sizes for hashes: running digest + 8 */
89 #define HASH_MSG_LEN 8
90 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
91
92 static struct list_head hash_list;
93
94 /* ahash per-session context */
95 struct caam_hash_ctx {
96 u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
97 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
98 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
99 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100 u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
101 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102 dma_addr_t sh_desc_update_first_dma;
103 dma_addr_t sh_desc_fin_dma;
104 dma_addr_t sh_desc_digest_dma;
105 enum dma_data_direction dir;
106 enum dma_data_direction key_dir;
107 struct device *jrdev;
108 int ctx_len;
109 struct alginfo adata;
110 };
111
112 /* ahash state */
113 struct caam_hash_state {
114 dma_addr_t buf_dma;
115 dma_addr_t ctx_dma;
116 int ctx_dma_len;
117 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
118 int buflen;
119 int next_buflen;
120 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
121 int (*update)(struct ahash_request *req) ____cacheline_aligned;
122 int (*final)(struct ahash_request *req);
123 int (*finup)(struct ahash_request *req);
124 struct ahash_edesc *edesc;
125 void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err,
126 void *context);
127 };
128
129 struct caam_export_state {
130 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
131 u8 caam_ctx[MAX_CTX_LEN];
132 int buflen;
133 int (*update)(struct ahash_request *req);
134 int (*final)(struct ahash_request *req);
135 int (*finup)(struct ahash_request *req);
136 };
137
is_cmac_aes(u32 algtype)138 static inline bool is_cmac_aes(u32 algtype)
139 {
140 return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
141 (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC);
142 }
143 /* Common job descriptor seq in/out ptr routines */
144
145 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
map_seq_out_ptr_ctx(u32 * desc,struct device * jrdev,struct caam_hash_state * state,int ctx_len)146 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
147 struct caam_hash_state *state,
148 int ctx_len)
149 {
150 state->ctx_dma_len = ctx_len;
151 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
152 ctx_len, DMA_FROM_DEVICE);
153 if (dma_mapping_error(jrdev, state->ctx_dma)) {
154 dev_err(jrdev, "unable to map ctx\n");
155 state->ctx_dma = 0;
156 return -ENOMEM;
157 }
158
159 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
160
161 return 0;
162 }
163
164 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_sec4_sg(struct device * jrdev,struct sec4_sg_entry * sec4_sg,struct caam_hash_state * state)165 static inline int buf_map_to_sec4_sg(struct device *jrdev,
166 struct sec4_sg_entry *sec4_sg,
167 struct caam_hash_state *state)
168 {
169 int buflen = state->buflen;
170
171 if (!buflen)
172 return 0;
173
174 state->buf_dma = dma_map_single(jrdev, state->buf, buflen,
175 DMA_TO_DEVICE);
176 if (dma_mapping_error(jrdev, state->buf_dma)) {
177 dev_err(jrdev, "unable to map buf\n");
178 state->buf_dma = 0;
179 return -ENOMEM;
180 }
181
182 dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
183
184 return 0;
185 }
186
187 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_sec4_sg(struct device * jrdev,struct caam_hash_state * state,int ctx_len,struct sec4_sg_entry * sec4_sg,u32 flag)188 static inline int ctx_map_to_sec4_sg(struct device *jrdev,
189 struct caam_hash_state *state, int ctx_len,
190 struct sec4_sg_entry *sec4_sg, u32 flag)
191 {
192 state->ctx_dma_len = ctx_len;
193 state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
194 if (dma_mapping_error(jrdev, state->ctx_dma)) {
195 dev_err(jrdev, "unable to map ctx\n");
196 state->ctx_dma = 0;
197 return -ENOMEM;
198 }
199
200 dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
201
202 return 0;
203 }
204
ahash_set_sh_desc(struct crypto_ahash * ahash)205 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
206 {
207 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
208 int digestsize = crypto_ahash_digestsize(ahash);
209 struct device *jrdev = ctx->jrdev;
210 struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
211 u32 *desc;
212
213 ctx->adata.key_virt = ctx->key;
214
215 /* ahash_update shared descriptor */
216 desc = ctx->sh_desc_update;
217 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
218 ctx->ctx_len, true, ctrlpriv->era);
219 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
220 desc_bytes(desc), ctx->dir);
221
222 print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ",
223 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
224 1);
225
226 /* ahash_update_first shared descriptor */
227 desc = ctx->sh_desc_update_first;
228 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
229 ctx->ctx_len, false, ctrlpriv->era);
230 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
231 desc_bytes(desc), ctx->dir);
232 print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__)
233 ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
234 desc_bytes(desc), 1);
235
236 /* ahash_final shared descriptor */
237 desc = ctx->sh_desc_fin;
238 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
239 ctx->ctx_len, true, ctrlpriv->era);
240 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
241 desc_bytes(desc), ctx->dir);
242
243 print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ",
244 DUMP_PREFIX_ADDRESS, 16, 4, desc,
245 desc_bytes(desc), 1);
246
247 /* ahash_digest shared descriptor */
248 desc = ctx->sh_desc_digest;
249 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
250 ctx->ctx_len, false, ctrlpriv->era);
251 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
252 desc_bytes(desc), ctx->dir);
253
254 print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ",
255 DUMP_PREFIX_ADDRESS, 16, 4, desc,
256 desc_bytes(desc), 1);
257
258 return 0;
259 }
260
axcbc_set_sh_desc(struct crypto_ahash * ahash)261 static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
262 {
263 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
264 int digestsize = crypto_ahash_digestsize(ahash);
265 struct device *jrdev = ctx->jrdev;
266 u32 *desc;
267
268 /* shared descriptor for ahash_update */
269 desc = ctx->sh_desc_update;
270 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
271 ctx->ctx_len, ctx->ctx_len);
272 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
273 desc_bytes(desc), ctx->dir);
274 print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
275 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
276 1);
277
278 /* shared descriptor for ahash_{final,finup} */
279 desc = ctx->sh_desc_fin;
280 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
281 digestsize, ctx->ctx_len);
282 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
283 desc_bytes(desc), ctx->dir);
284 print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
285 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
286 1);
287
288 /* key is immediate data for INIT and INITFINAL states */
289 ctx->adata.key_virt = ctx->key;
290
291 /* shared descriptor for first invocation of ahash_update */
292 desc = ctx->sh_desc_update_first;
293 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
294 ctx->ctx_len);
295 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
296 desc_bytes(desc), ctx->dir);
297 print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)
298 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
299 desc_bytes(desc), 1);
300
301 /* shared descriptor for ahash_digest */
302 desc = ctx->sh_desc_digest;
303 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
304 digestsize, ctx->ctx_len);
305 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
306 desc_bytes(desc), ctx->dir);
307 print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
308 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
309 1);
310 return 0;
311 }
312
acmac_set_sh_desc(struct crypto_ahash * ahash)313 static int acmac_set_sh_desc(struct crypto_ahash *ahash)
314 {
315 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
316 int digestsize = crypto_ahash_digestsize(ahash);
317 struct device *jrdev = ctx->jrdev;
318 u32 *desc;
319
320 /* shared descriptor for ahash_update */
321 desc = ctx->sh_desc_update;
322 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE,
323 ctx->ctx_len, ctx->ctx_len);
324 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
325 desc_bytes(desc), ctx->dir);
326 print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ",
327 DUMP_PREFIX_ADDRESS, 16, 4, desc,
328 desc_bytes(desc), 1);
329
330 /* shared descriptor for ahash_{final,finup} */
331 desc = ctx->sh_desc_fin;
332 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE,
333 digestsize, ctx->ctx_len);
334 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
335 desc_bytes(desc), ctx->dir);
336 print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ",
337 DUMP_PREFIX_ADDRESS, 16, 4, desc,
338 desc_bytes(desc), 1);
339
340 /* shared descriptor for first invocation of ahash_update */
341 desc = ctx->sh_desc_update_first;
342 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
343 ctx->ctx_len);
344 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
345 desc_bytes(desc), ctx->dir);
346 print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__)
347 " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc,
348 desc_bytes(desc), 1);
349
350 /* shared descriptor for ahash_digest */
351 desc = ctx->sh_desc_digest;
352 cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL,
353 digestsize, ctx->ctx_len);
354 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
355 desc_bytes(desc), ctx->dir);
356 print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ",
357 DUMP_PREFIX_ADDRESS, 16, 4, desc,
358 desc_bytes(desc), 1);
359
360 return 0;
361 }
362
363 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)364 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
365 u32 digestsize)
366 {
367 struct device *jrdev = ctx->jrdev;
368 u32 *desc;
369 struct split_key_result result;
370 dma_addr_t key_dma;
371 int ret;
372
373 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
374 if (!desc)
375 return -ENOMEM;
376
377 init_job_desc(desc, 0);
378
379 key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL);
380 if (dma_mapping_error(jrdev, key_dma)) {
381 dev_err(jrdev, "unable to map key memory\n");
382 kfree(desc);
383 return -ENOMEM;
384 }
385
386 /* Job descriptor to perform unkeyed hash on key_in */
387 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
388 OP_ALG_AS_INITFINAL);
389 append_seq_in_ptr(desc, key_dma, *keylen, 0);
390 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
391 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
392 append_seq_out_ptr(desc, key_dma, digestsize, 0);
393 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
394 LDST_SRCDST_BYTE_CONTEXT);
395
396 print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
397 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
398 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
399 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
400 1);
401
402 result.err = 0;
403 init_completion(&result.completion);
404
405 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
406 if (ret == -EINPROGRESS) {
407 /* in progress */
408 wait_for_completion(&result.completion);
409 ret = result.err;
410
411 print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
412 DUMP_PREFIX_ADDRESS, 16, 4, key,
413 digestsize, 1);
414 }
415 dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL);
416
417 *keylen = digestsize;
418
419 kfree(desc);
420
421 return ret;
422 }
423
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)424 static int ahash_setkey(struct crypto_ahash *ahash,
425 const u8 *key, unsigned int keylen)
426 {
427 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
428 struct device *jrdev = ctx->jrdev;
429 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
430 int digestsize = crypto_ahash_digestsize(ahash);
431 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
432 int ret;
433 u8 *hashed_key = NULL;
434
435 dev_dbg(jrdev, "keylen %d\n", keylen);
436
437 if (keylen > blocksize) {
438 unsigned int aligned_len =
439 ALIGN(keylen, dma_get_cache_alignment());
440
441 if (aligned_len < keylen)
442 return -EOVERFLOW;
443
444 hashed_key = kmemdup(key, keylen, GFP_KERNEL);
445 if (!hashed_key)
446 return -ENOMEM;
447 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
448 if (ret)
449 goto bad_free_key;
450 key = hashed_key;
451 }
452
453 /*
454 * If DKP is supported, use it in the shared descriptor to generate
455 * the split key.
456 */
457 if (ctrlpriv->era >= 6) {
458 ctx->adata.key_inline = true;
459 ctx->adata.keylen = keylen;
460 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
461 OP_ALG_ALGSEL_MASK);
462
463 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
464 goto bad_free_key;
465
466 memcpy(ctx->key, key, keylen);
467
468 /*
469 * In case |user key| > |derived key|, using DKP<imm,imm>
470 * would result in invalid opcodes (last bytes of user key) in
471 * the resulting descriptor. Use DKP<ptr,imm> instead => both
472 * virtual and dma key addresses are needed.
473 */
474 if (keylen > ctx->adata.keylen_pad)
475 dma_sync_single_for_device(ctx->jrdev,
476 ctx->adata.key_dma,
477 ctx->adata.keylen_pad,
478 DMA_TO_DEVICE);
479 } else {
480 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
481 keylen, CAAM_MAX_HASH_KEY_SIZE);
482 if (ret)
483 goto bad_free_key;
484 }
485
486 kfree(hashed_key);
487 return ahash_set_sh_desc(ahash);
488 bad_free_key:
489 kfree(hashed_key);
490 return -EINVAL;
491 }
492
axcbc_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)493 static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
494 unsigned int keylen)
495 {
496 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
497 struct device *jrdev = ctx->jrdev;
498
499 if (keylen != AES_KEYSIZE_128)
500 return -EINVAL;
501
502 memcpy(ctx->key, key, keylen);
503 dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen,
504 DMA_TO_DEVICE);
505 ctx->adata.keylen = keylen;
506
507 print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
508 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
509
510 return axcbc_set_sh_desc(ahash);
511 }
512
acmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)513 static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
514 unsigned int keylen)
515 {
516 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
517 int err;
518
519 err = aes_check_keylen(keylen);
520 if (err)
521 return err;
522
523 /* key is immediate data for all cmac shared descriptors */
524 ctx->adata.key_virt = key;
525 ctx->adata.keylen = keylen;
526
527 print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ",
528 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
529
530 return acmac_set_sh_desc(ahash);
531 }
532
533 /*
534 * ahash_edesc - s/w-extended ahash descriptor
535 * @sec4_sg_dma: physical mapped address of h/w link table
536 * @src_nents: number of segments in input scatterlist
537 * @sec4_sg_bytes: length of dma mapped sec4_sg space
538 * @bklog: stored to determine if the request needs backlog
539 * @hw_desc: the h/w job descriptor followed by any referenced link tables
540 * @sec4_sg: h/w link table
541 */
542 struct ahash_edesc {
543 dma_addr_t sec4_sg_dma;
544 int src_nents;
545 int sec4_sg_bytes;
546 bool bklog;
547 u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
548 struct sec4_sg_entry sec4_sg[];
549 };
550
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len)551 static inline void ahash_unmap(struct device *dev,
552 struct ahash_edesc *edesc,
553 struct ahash_request *req, int dst_len)
554 {
555 struct caam_hash_state *state = ahash_request_ctx_dma(req);
556
557 if (edesc->src_nents)
558 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
559
560 if (edesc->sec4_sg_bytes)
561 dma_unmap_single(dev, edesc->sec4_sg_dma,
562 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
563
564 if (state->buf_dma) {
565 dma_unmap_single(dev, state->buf_dma, state->buflen,
566 DMA_TO_DEVICE);
567 state->buf_dma = 0;
568 }
569 }
570
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,int dst_len,u32 flag)571 static inline void ahash_unmap_ctx(struct device *dev,
572 struct ahash_edesc *edesc,
573 struct ahash_request *req, int dst_len, u32 flag)
574 {
575 struct caam_hash_state *state = ahash_request_ctx_dma(req);
576
577 if (state->ctx_dma) {
578 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
579 state->ctx_dma = 0;
580 }
581 ahash_unmap(dev, edesc, req, dst_len);
582 }
583
ahash_done_cpy(struct device * jrdev,u32 * desc,u32 err,void * context,enum dma_data_direction dir)584 static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
585 void *context, enum dma_data_direction dir)
586 {
587 struct ahash_request *req = context;
588 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
589 struct ahash_edesc *edesc;
590 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
591 int digestsize = crypto_ahash_digestsize(ahash);
592 struct caam_hash_state *state = ahash_request_ctx_dma(req);
593 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
594 int ecode = 0;
595 bool has_bklog;
596
597 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
598
599 edesc = state->edesc;
600 has_bklog = edesc->bklog;
601
602 if (err)
603 ecode = caam_jr_strstatus(jrdev, err);
604
605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
606 memcpy(req->result, state->caam_ctx, digestsize);
607 kfree(edesc);
608
609 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
610 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
611 ctx->ctx_len, 1);
612
613 /*
614 * If no backlog flag, the completion of the request is done
615 * by CAAM, not crypto engine.
616 */
617 if (!has_bklog)
618 ahash_request_complete(req, ecode);
619 else
620 crypto_finalize_hash_request(jrp->engine, req, ecode);
621 }
622
ahash_done(struct device * jrdev,u32 * desc,u32 err,void * context)623 static void ahash_done(struct device *jrdev, u32 *desc, u32 err,
624 void *context)
625 {
626 ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE);
627 }
628
ahash_done_ctx_src(struct device * jrdev,u32 * desc,u32 err,void * context)629 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
630 void *context)
631 {
632 ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
633 }
634
ahash_done_switch(struct device * jrdev,u32 * desc,u32 err,void * context,enum dma_data_direction dir)635 static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
636 void *context, enum dma_data_direction dir)
637 {
638 struct ahash_request *req = context;
639 struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
640 struct ahash_edesc *edesc;
641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
642 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
643 struct caam_hash_state *state = ahash_request_ctx_dma(req);
644 int digestsize = crypto_ahash_digestsize(ahash);
645 int ecode = 0;
646 bool has_bklog;
647
648 dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
649
650 edesc = state->edesc;
651 has_bklog = edesc->bklog;
652 if (err)
653 ecode = caam_jr_strstatus(jrdev, err);
654
655 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
656 kfree(edesc);
657
658 scatterwalk_map_and_copy(state->buf, req->src,
659 req->nbytes - state->next_buflen,
660 state->next_buflen, 0);
661 state->buflen = state->next_buflen;
662
663 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
664 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
665 state->buflen, 1);
666
667 print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
668 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
669 ctx->ctx_len, 1);
670 if (req->result)
671 print_hex_dump_debug("result@"__stringify(__LINE__)": ",
672 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
673 digestsize, 1);
674
675 /*
676 * If no backlog flag, the completion of the request is done
677 * by CAAM, not crypto engine.
678 */
679 if (!has_bklog)
680 ahash_request_complete(req, ecode);
681 else
682 crypto_finalize_hash_request(jrp->engine, req, ecode);
683
684 }
685
ahash_done_bi(struct device * jrdev,u32 * desc,u32 err,void * context)686 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err,
687 void *context)
688 {
689 ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL);
690 }
691
ahash_done_ctx_dst(struct device * jrdev,u32 * desc,u32 err,void * context)692 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
693 void *context)
694 {
695 ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE);
696 }
697
698 /*
699 * Allocate an enhanced descriptor, which contains the hardware descriptor
700 * and space for hardware scatter table containing sg_num entries.
701 */
ahash_edesc_alloc(struct ahash_request * req,int sg_num,u32 * sh_desc,dma_addr_t sh_desc_dma)702 static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
703 int sg_num, u32 *sh_desc,
704 dma_addr_t sh_desc_dma)
705 {
706 struct caam_hash_state *state = ahash_request_ctx_dma(req);
707 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
708 GFP_KERNEL : GFP_ATOMIC;
709 struct ahash_edesc *edesc;
710
711 sg_num = pad_sg_nents(sg_num);
712 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
713 if (!edesc)
714 return NULL;
715
716 state->edesc = edesc;
717
718 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc),
719 HDR_SHARE_DEFER | HDR_REVERSE);
720
721 return edesc;
722 }
723
ahash_edesc_add_src(struct caam_hash_ctx * ctx,struct ahash_edesc * edesc,struct ahash_request * req,int nents,unsigned int first_sg,unsigned int first_bytes,size_t to_hash)724 static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
725 struct ahash_edesc *edesc,
726 struct ahash_request *req, int nents,
727 unsigned int first_sg,
728 unsigned int first_bytes, size_t to_hash)
729 {
730 dma_addr_t src_dma;
731 u32 options;
732
733 if (nents > 1 || first_sg) {
734 struct sec4_sg_entry *sg = edesc->sec4_sg;
735 unsigned int sgsize = sizeof(*sg) *
736 pad_sg_nents(first_sg + nents);
737
738 sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
739
740 src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
741 if (dma_mapping_error(ctx->jrdev, src_dma)) {
742 dev_err(ctx->jrdev, "unable to map S/G table\n");
743 return -ENOMEM;
744 }
745
746 edesc->sec4_sg_bytes = sgsize;
747 edesc->sec4_sg_dma = src_dma;
748 options = LDST_SGF;
749 } else {
750 src_dma = sg_dma_address(req->src);
751 options = 0;
752 }
753
754 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash,
755 options);
756
757 return 0;
758 }
759
ahash_do_one_req(struct crypto_engine * engine,void * areq)760 static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
761 {
762 struct ahash_request *req = ahash_request_cast(areq);
763 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req));
764 struct caam_hash_state *state = ahash_request_ctx_dma(req);
765 struct device *jrdev = ctx->jrdev;
766 u32 *desc = state->edesc->hw_desc;
767 int ret;
768
769 state->edesc->bklog = true;
770
771 ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req);
772
773 if (ret == -ENOSPC && engine->retry_support)
774 return ret;
775
776 if (ret != -EINPROGRESS) {
777 ahash_unmap(jrdev, state->edesc, req, 0);
778 kfree(state->edesc);
779 } else {
780 ret = 0;
781 }
782
783 return ret;
784 }
785
ahash_enqueue_req(struct device * jrdev,void (* cbk)(struct device * jrdev,u32 * desc,u32 err,void * context),struct ahash_request * req,int dst_len,enum dma_data_direction dir)786 static int ahash_enqueue_req(struct device *jrdev,
787 void (*cbk)(struct device *jrdev, u32 *desc,
788 u32 err, void *context),
789 struct ahash_request *req,
790 int dst_len, enum dma_data_direction dir)
791 {
792 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
793 struct caam_hash_state *state = ahash_request_ctx_dma(req);
794 struct ahash_edesc *edesc = state->edesc;
795 u32 *desc = edesc->hw_desc;
796 int ret;
797
798 state->ahash_op_done = cbk;
799
800 /*
801 * Only the backlog request are sent to crypto-engine since the others
802 * can be handled by CAAM, if free, especially since JR has up to 1024
803 * entries (more than the 10 entries from crypto-engine).
804 */
805 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
806 ret = crypto_transfer_hash_request_to_engine(jrpriv->engine,
807 req);
808 else
809 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
810
811 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
812 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
813 kfree(edesc);
814 }
815
816 return ret;
817 }
818
819 /* submit update job descriptor */
ahash_update_ctx(struct ahash_request * req)820 static int ahash_update_ctx(struct ahash_request *req)
821 {
822 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
823 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
824 struct caam_hash_state *state = ahash_request_ctx_dma(req);
825 struct device *jrdev = ctx->jrdev;
826 u8 *buf = state->buf;
827 int *buflen = &state->buflen;
828 int *next_buflen = &state->next_buflen;
829 int blocksize = crypto_ahash_blocksize(ahash);
830 int in_len = *buflen + req->nbytes, to_hash;
831 u32 *desc;
832 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
833 struct ahash_edesc *edesc;
834 int ret = 0;
835
836 *next_buflen = in_len & (blocksize - 1);
837 to_hash = in_len - *next_buflen;
838
839 /*
840 * For XCBC and CMAC, if to_hash is multiple of block size,
841 * keep last block in internal buffer
842 */
843 if ((is_xcbc_aes(ctx->adata.algtype) ||
844 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
845 (*next_buflen == 0)) {
846 *next_buflen = blocksize;
847 to_hash -= blocksize;
848 }
849
850 if (to_hash) {
851 int pad_nents;
852 int src_len = req->nbytes - *next_buflen;
853
854 src_nents = sg_nents_for_len(req->src, src_len);
855 if (src_nents < 0) {
856 dev_err(jrdev, "Invalid number of src SG.\n");
857 return src_nents;
858 }
859
860 if (src_nents) {
861 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
862 DMA_TO_DEVICE);
863 if (!mapped_nents) {
864 dev_err(jrdev, "unable to DMA map source\n");
865 return -ENOMEM;
866 }
867 } else {
868 mapped_nents = 0;
869 }
870
871 sec4_sg_src_index = 1 + (*buflen ? 1 : 0);
872 pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents);
873 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
874
875 /*
876 * allocate space for base edesc and hw desc commands,
877 * link tables
878 */
879 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
880 ctx->sh_desc_update_dma);
881 if (!edesc) {
882 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
883 return -ENOMEM;
884 }
885
886 edesc->src_nents = src_nents;
887 edesc->sec4_sg_bytes = sec4_sg_bytes;
888
889 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
890 edesc->sec4_sg, DMA_BIDIRECTIONAL);
891 if (ret)
892 goto unmap_ctx;
893
894 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
895 if (ret)
896 goto unmap_ctx;
897
898 if (mapped_nents)
899 sg_to_sec4_sg_last(req->src, src_len,
900 edesc->sec4_sg + sec4_sg_src_index,
901 0);
902 else
903 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
904 1);
905
906 desc = edesc->hw_desc;
907
908 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
909 sec4_sg_bytes,
910 DMA_TO_DEVICE);
911 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
912 dev_err(jrdev, "unable to map S/G table\n");
913 ret = -ENOMEM;
914 goto unmap_ctx;
915 }
916
917 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
918 to_hash, LDST_SGF);
919
920 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
921
922 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
923 DUMP_PREFIX_ADDRESS, 16, 4, desc,
924 desc_bytes(desc), 1);
925
926 ret = ahash_enqueue_req(jrdev, ahash_done_bi, req,
927 ctx->ctx_len, DMA_BIDIRECTIONAL);
928 } else if (*next_buflen) {
929 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
930 req->nbytes, 0);
931 *buflen = *next_buflen;
932
933 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
934 DUMP_PREFIX_ADDRESS, 16, 4, buf,
935 *buflen, 1);
936 }
937
938 return ret;
939 unmap_ctx:
940 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
941 kfree(edesc);
942 return ret;
943 }
944
ahash_final_ctx(struct ahash_request * req)945 static int ahash_final_ctx(struct ahash_request *req)
946 {
947 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
948 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
949 struct caam_hash_state *state = ahash_request_ctx_dma(req);
950 struct device *jrdev = ctx->jrdev;
951 int buflen = state->buflen;
952 u32 *desc;
953 int sec4_sg_bytes;
954 int digestsize = crypto_ahash_digestsize(ahash);
955 struct ahash_edesc *edesc;
956 int ret;
957
958 sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) *
959 sizeof(struct sec4_sg_entry);
960
961 /* allocate space for base edesc and hw desc commands, link tables */
962 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
963 ctx->sh_desc_fin_dma);
964 if (!edesc)
965 return -ENOMEM;
966
967 desc = edesc->hw_desc;
968
969 edesc->sec4_sg_bytes = sec4_sg_bytes;
970
971 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
972 edesc->sec4_sg, DMA_BIDIRECTIONAL);
973 if (ret)
974 goto unmap_ctx;
975
976 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
977 if (ret)
978 goto unmap_ctx;
979
980 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0));
981
982 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
983 sec4_sg_bytes, DMA_TO_DEVICE);
984 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
985 dev_err(jrdev, "unable to map S/G table\n");
986 ret = -ENOMEM;
987 goto unmap_ctx;
988 }
989
990 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
991 LDST_SGF);
992 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
993
994 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
995 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
996 1);
997
998 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
999 digestsize, DMA_BIDIRECTIONAL);
1000 unmap_ctx:
1001 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1002 kfree(edesc);
1003 return ret;
1004 }
1005
ahash_finup_ctx(struct ahash_request * req)1006 static int ahash_finup_ctx(struct ahash_request *req)
1007 {
1008 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1009 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1010 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1011 struct device *jrdev = ctx->jrdev;
1012 int buflen = state->buflen;
1013 u32 *desc;
1014 int sec4_sg_src_index;
1015 int src_nents, mapped_nents;
1016 int digestsize = crypto_ahash_digestsize(ahash);
1017 struct ahash_edesc *edesc;
1018 int ret;
1019
1020 src_nents = sg_nents_for_len(req->src, req->nbytes);
1021 if (src_nents < 0) {
1022 dev_err(jrdev, "Invalid number of src SG.\n");
1023 return src_nents;
1024 }
1025
1026 if (src_nents) {
1027 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1028 DMA_TO_DEVICE);
1029 if (!mapped_nents) {
1030 dev_err(jrdev, "unable to DMA map source\n");
1031 return -ENOMEM;
1032 }
1033 } else {
1034 mapped_nents = 0;
1035 }
1036
1037 sec4_sg_src_index = 1 + (buflen ? 1 : 0);
1038
1039 /* allocate space for base edesc and hw desc commands, link tables */
1040 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1041 ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
1042 if (!edesc) {
1043 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1044 return -ENOMEM;
1045 }
1046
1047 desc = edesc->hw_desc;
1048
1049 edesc->src_nents = src_nents;
1050
1051 ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
1052 edesc->sec4_sg, DMA_BIDIRECTIONAL);
1053 if (ret)
1054 goto unmap_ctx;
1055
1056 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
1057 if (ret)
1058 goto unmap_ctx;
1059
1060 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
1061 sec4_sg_src_index, ctx->ctx_len + buflen,
1062 req->nbytes);
1063 if (ret)
1064 goto unmap_ctx;
1065
1066 append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0);
1067
1068 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1069 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1070 1);
1071
1072 return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req,
1073 digestsize, DMA_BIDIRECTIONAL);
1074 unmap_ctx:
1075 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
1076 kfree(edesc);
1077 return ret;
1078 }
1079
ahash_digest(struct ahash_request * req)1080 static int ahash_digest(struct ahash_request *req)
1081 {
1082 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1083 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1084 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1085 struct device *jrdev = ctx->jrdev;
1086 u32 *desc;
1087 int digestsize = crypto_ahash_digestsize(ahash);
1088 int src_nents, mapped_nents;
1089 struct ahash_edesc *edesc;
1090 int ret;
1091
1092 state->buf_dma = 0;
1093
1094 src_nents = sg_nents_for_len(req->src, req->nbytes);
1095 if (src_nents < 0) {
1096 dev_err(jrdev, "Invalid number of src SG.\n");
1097 return src_nents;
1098 }
1099
1100 if (src_nents) {
1101 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1102 DMA_TO_DEVICE);
1103 if (!mapped_nents) {
1104 dev_err(jrdev, "unable to map source for DMA\n");
1105 return -ENOMEM;
1106 }
1107 } else {
1108 mapped_nents = 0;
1109 }
1110
1111 /* allocate space for base edesc and hw desc commands, link tables */
1112 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
1113 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1114 if (!edesc) {
1115 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1116 return -ENOMEM;
1117 }
1118
1119 edesc->src_nents = src_nents;
1120
1121 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1122 req->nbytes);
1123 if (ret) {
1124 ahash_unmap(jrdev, edesc, req, digestsize);
1125 kfree(edesc);
1126 return ret;
1127 }
1128
1129 desc = edesc->hw_desc;
1130
1131 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1132 if (ret) {
1133 ahash_unmap(jrdev, edesc, req, digestsize);
1134 kfree(edesc);
1135 return -ENOMEM;
1136 }
1137
1138 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1139 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1140 1);
1141
1142 return ahash_enqueue_req(jrdev, ahash_done, req, digestsize,
1143 DMA_FROM_DEVICE);
1144 }
1145
1146 /* submit ahash final if it the first job descriptor */
ahash_final_no_ctx(struct ahash_request * req)1147 static int ahash_final_no_ctx(struct ahash_request *req)
1148 {
1149 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1150 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1151 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1152 struct device *jrdev = ctx->jrdev;
1153 u8 *buf = state->buf;
1154 int buflen = state->buflen;
1155 u32 *desc;
1156 int digestsize = crypto_ahash_digestsize(ahash);
1157 struct ahash_edesc *edesc;
1158 int ret;
1159
1160 /* allocate space for base edesc and hw desc commands, link tables */
1161 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
1162 ctx->sh_desc_digest_dma);
1163 if (!edesc)
1164 return -ENOMEM;
1165
1166 desc = edesc->hw_desc;
1167
1168 if (buflen) {
1169 state->buf_dma = dma_map_single(jrdev, buf, buflen,
1170 DMA_TO_DEVICE);
1171 if (dma_mapping_error(jrdev, state->buf_dma)) {
1172 dev_err(jrdev, "unable to map src\n");
1173 goto unmap;
1174 }
1175
1176 append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
1177 }
1178
1179 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1180 if (ret)
1181 goto unmap;
1182
1183 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1184 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1185 1);
1186
1187 return ahash_enqueue_req(jrdev, ahash_done, req,
1188 digestsize, DMA_FROM_DEVICE);
1189 unmap:
1190 ahash_unmap(jrdev, edesc, req, digestsize);
1191 kfree(edesc);
1192 return -ENOMEM;
1193 }
1194
1195 /* submit ahash update if it the first job descriptor after update */
ahash_update_no_ctx(struct ahash_request * req)1196 static int ahash_update_no_ctx(struct ahash_request *req)
1197 {
1198 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1199 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1200 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1201 struct device *jrdev = ctx->jrdev;
1202 u8 *buf = state->buf;
1203 int *buflen = &state->buflen;
1204 int *next_buflen = &state->next_buflen;
1205 int blocksize = crypto_ahash_blocksize(ahash);
1206 int in_len = *buflen + req->nbytes, to_hash;
1207 int sec4_sg_bytes, src_nents, mapped_nents;
1208 struct ahash_edesc *edesc;
1209 u32 *desc;
1210 int ret = 0;
1211
1212 *next_buflen = in_len & (blocksize - 1);
1213 to_hash = in_len - *next_buflen;
1214
1215 /*
1216 * For XCBC and CMAC, if to_hash is multiple of block size,
1217 * keep last block in internal buffer
1218 */
1219 if ((is_xcbc_aes(ctx->adata.algtype) ||
1220 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1221 (*next_buflen == 0)) {
1222 *next_buflen = blocksize;
1223 to_hash -= blocksize;
1224 }
1225
1226 if (to_hash) {
1227 int pad_nents;
1228 int src_len = req->nbytes - *next_buflen;
1229
1230 src_nents = sg_nents_for_len(req->src, src_len);
1231 if (src_nents < 0) {
1232 dev_err(jrdev, "Invalid number of src SG.\n");
1233 return src_nents;
1234 }
1235
1236 if (src_nents) {
1237 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1238 DMA_TO_DEVICE);
1239 if (!mapped_nents) {
1240 dev_err(jrdev, "unable to DMA map source\n");
1241 return -ENOMEM;
1242 }
1243 } else {
1244 mapped_nents = 0;
1245 }
1246
1247 pad_nents = pad_sg_nents(1 + mapped_nents);
1248 sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry);
1249
1250 /*
1251 * allocate space for base edesc and hw desc commands,
1252 * link tables
1253 */
1254 edesc = ahash_edesc_alloc(req, pad_nents,
1255 ctx->sh_desc_update_first,
1256 ctx->sh_desc_update_first_dma);
1257 if (!edesc) {
1258 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1259 return -ENOMEM;
1260 }
1261
1262 edesc->src_nents = src_nents;
1263 edesc->sec4_sg_bytes = sec4_sg_bytes;
1264
1265 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1266 if (ret)
1267 goto unmap_ctx;
1268
1269 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
1270
1271 desc = edesc->hw_desc;
1272
1273 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1274 sec4_sg_bytes,
1275 DMA_TO_DEVICE);
1276 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1277 dev_err(jrdev, "unable to map S/G table\n");
1278 ret = -ENOMEM;
1279 goto unmap_ctx;
1280 }
1281
1282 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
1283
1284 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1285 if (ret)
1286 goto unmap_ctx;
1287
1288 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1289 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1290 desc_bytes(desc), 1);
1291
1292 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1293 ctx->ctx_len, DMA_TO_DEVICE);
1294 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1295 return ret;
1296 state->update = ahash_update_ctx;
1297 state->finup = ahash_finup_ctx;
1298 state->final = ahash_final_ctx;
1299 } else if (*next_buflen) {
1300 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
1301 req->nbytes, 0);
1302 *buflen = *next_buflen;
1303
1304 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1305 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1306 *buflen, 1);
1307 }
1308
1309 return ret;
1310 unmap_ctx:
1311 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1312 kfree(edesc);
1313 return ret;
1314 }
1315
1316 /* submit ahash finup if it the first job descriptor after update */
ahash_finup_no_ctx(struct ahash_request * req)1317 static int ahash_finup_no_ctx(struct ahash_request *req)
1318 {
1319 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1320 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1321 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1322 struct device *jrdev = ctx->jrdev;
1323 int buflen = state->buflen;
1324 u32 *desc;
1325 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
1326 int digestsize = crypto_ahash_digestsize(ahash);
1327 struct ahash_edesc *edesc;
1328 int ret;
1329
1330 src_nents = sg_nents_for_len(req->src, req->nbytes);
1331 if (src_nents < 0) {
1332 dev_err(jrdev, "Invalid number of src SG.\n");
1333 return src_nents;
1334 }
1335
1336 if (src_nents) {
1337 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1338 DMA_TO_DEVICE);
1339 if (!mapped_nents) {
1340 dev_err(jrdev, "unable to DMA map source\n");
1341 return -ENOMEM;
1342 }
1343 } else {
1344 mapped_nents = 0;
1345 }
1346
1347 sec4_sg_src_index = 2;
1348 sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) *
1349 sizeof(struct sec4_sg_entry);
1350
1351 /* allocate space for base edesc and hw desc commands, link tables */
1352 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
1353 ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
1354 if (!edesc) {
1355 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1356 return -ENOMEM;
1357 }
1358
1359 desc = edesc->hw_desc;
1360
1361 edesc->src_nents = src_nents;
1362 edesc->sec4_sg_bytes = sec4_sg_bytes;
1363
1364 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
1365 if (ret)
1366 goto unmap;
1367
1368 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
1369 req->nbytes);
1370 if (ret) {
1371 dev_err(jrdev, "unable to map S/G table\n");
1372 goto unmap;
1373 }
1374
1375 ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
1376 if (ret)
1377 goto unmap;
1378
1379 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1380 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
1381 1);
1382
1383 return ahash_enqueue_req(jrdev, ahash_done, req,
1384 digestsize, DMA_FROM_DEVICE);
1385 unmap:
1386 ahash_unmap(jrdev, edesc, req, digestsize);
1387 kfree(edesc);
1388 return -ENOMEM;
1389
1390 }
1391
1392 /* submit first update job descriptor after init */
ahash_update_first(struct ahash_request * req)1393 static int ahash_update_first(struct ahash_request *req)
1394 {
1395 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1396 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1397 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1398 struct device *jrdev = ctx->jrdev;
1399 u8 *buf = state->buf;
1400 int *buflen = &state->buflen;
1401 int *next_buflen = &state->next_buflen;
1402 int to_hash;
1403 int blocksize = crypto_ahash_blocksize(ahash);
1404 u32 *desc;
1405 int src_nents, mapped_nents;
1406 struct ahash_edesc *edesc;
1407 int ret = 0;
1408
1409 *next_buflen = req->nbytes & (blocksize - 1);
1410 to_hash = req->nbytes - *next_buflen;
1411
1412 /*
1413 * For XCBC and CMAC, if to_hash is multiple of block size,
1414 * keep last block in internal buffer
1415 */
1416 if ((is_xcbc_aes(ctx->adata.algtype) ||
1417 is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize &&
1418 (*next_buflen == 0)) {
1419 *next_buflen = blocksize;
1420 to_hash -= blocksize;
1421 }
1422
1423 if (to_hash) {
1424 src_nents = sg_nents_for_len(req->src,
1425 req->nbytes - *next_buflen);
1426 if (src_nents < 0) {
1427 dev_err(jrdev, "Invalid number of src SG.\n");
1428 return src_nents;
1429 }
1430
1431 if (src_nents) {
1432 mapped_nents = dma_map_sg(jrdev, req->src, src_nents,
1433 DMA_TO_DEVICE);
1434 if (!mapped_nents) {
1435 dev_err(jrdev, "unable to map source for DMA\n");
1436 return -ENOMEM;
1437 }
1438 } else {
1439 mapped_nents = 0;
1440 }
1441
1442 /*
1443 * allocate space for base edesc and hw desc commands,
1444 * link tables
1445 */
1446 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
1447 mapped_nents : 0,
1448 ctx->sh_desc_update_first,
1449 ctx->sh_desc_update_first_dma);
1450 if (!edesc) {
1451 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
1452 return -ENOMEM;
1453 }
1454
1455 edesc->src_nents = src_nents;
1456
1457 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
1458 to_hash);
1459 if (ret)
1460 goto unmap_ctx;
1461
1462 desc = edesc->hw_desc;
1463
1464 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
1465 if (ret)
1466 goto unmap_ctx;
1467
1468 print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
1469 DUMP_PREFIX_ADDRESS, 16, 4, desc,
1470 desc_bytes(desc), 1);
1471
1472 ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req,
1473 ctx->ctx_len, DMA_TO_DEVICE);
1474 if ((ret != -EINPROGRESS) && (ret != -EBUSY))
1475 return ret;
1476 state->update = ahash_update_ctx;
1477 state->finup = ahash_finup_ctx;
1478 state->final = ahash_final_ctx;
1479 } else if (*next_buflen) {
1480 state->update = ahash_update_no_ctx;
1481 state->finup = ahash_finup_no_ctx;
1482 state->final = ahash_final_no_ctx;
1483 scatterwalk_map_and_copy(buf, req->src, 0,
1484 req->nbytes, 0);
1485 *buflen = *next_buflen;
1486
1487 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
1488 DUMP_PREFIX_ADDRESS, 16, 4, buf,
1489 *buflen, 1);
1490 }
1491
1492 return ret;
1493 unmap_ctx:
1494 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
1495 kfree(edesc);
1496 return ret;
1497 }
1498
ahash_finup_first(struct ahash_request * req)1499 static int ahash_finup_first(struct ahash_request *req)
1500 {
1501 return ahash_digest(req);
1502 }
1503
ahash_init(struct ahash_request * req)1504 static int ahash_init(struct ahash_request *req)
1505 {
1506 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1507
1508 state->update = ahash_update_first;
1509 state->finup = ahash_finup_first;
1510 state->final = ahash_final_no_ctx;
1511
1512 state->ctx_dma = 0;
1513 state->ctx_dma_len = 0;
1514 state->buf_dma = 0;
1515 state->buflen = 0;
1516 state->next_buflen = 0;
1517
1518 return 0;
1519 }
1520
ahash_update(struct ahash_request * req)1521 static int ahash_update(struct ahash_request *req)
1522 {
1523 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1524
1525 return state->update(req);
1526 }
1527
ahash_finup(struct ahash_request * req)1528 static int ahash_finup(struct ahash_request *req)
1529 {
1530 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1531
1532 return state->finup(req);
1533 }
1534
ahash_final(struct ahash_request * req)1535 static int ahash_final(struct ahash_request *req)
1536 {
1537 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1538
1539 return state->final(req);
1540 }
1541
ahash_export(struct ahash_request * req,void * out)1542 static int ahash_export(struct ahash_request *req, void *out)
1543 {
1544 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1545 struct caam_export_state *export = out;
1546 u8 *buf = state->buf;
1547 int len = state->buflen;
1548
1549 memcpy(export->buf, buf, len);
1550 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
1551 export->buflen = len;
1552 export->update = state->update;
1553 export->final = state->final;
1554 export->finup = state->finup;
1555
1556 return 0;
1557 }
1558
ahash_import(struct ahash_request * req,const void * in)1559 static int ahash_import(struct ahash_request *req, const void *in)
1560 {
1561 struct caam_hash_state *state = ahash_request_ctx_dma(req);
1562 const struct caam_export_state *export = in;
1563
1564 memset(state, 0, sizeof(*state));
1565 memcpy(state->buf, export->buf, export->buflen);
1566 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
1567 state->buflen = export->buflen;
1568 state->update = export->update;
1569 state->final = export->final;
1570 state->finup = export->finup;
1571
1572 return 0;
1573 }
1574
1575 struct caam_hash_template {
1576 char name[CRYPTO_MAX_ALG_NAME];
1577 char driver_name[CRYPTO_MAX_ALG_NAME];
1578 char hmac_name[CRYPTO_MAX_ALG_NAME];
1579 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
1580 unsigned int blocksize;
1581 struct ahash_alg template_ahash;
1582 u32 alg_type;
1583 };
1584
1585 /* ahash descriptors */
1586 static struct caam_hash_template driver_hash[] = {
1587 {
1588 .name = "sha1",
1589 .driver_name = "sha1-caam",
1590 .hmac_name = "hmac(sha1)",
1591 .hmac_driver_name = "hmac-sha1-caam",
1592 .blocksize = SHA1_BLOCK_SIZE,
1593 .template_ahash = {
1594 .init = ahash_init,
1595 .update = ahash_update,
1596 .final = ahash_final,
1597 .finup = ahash_finup,
1598 .digest = ahash_digest,
1599 .export = ahash_export,
1600 .import = ahash_import,
1601 .setkey = ahash_setkey,
1602 .halg = {
1603 .digestsize = SHA1_DIGEST_SIZE,
1604 .statesize = sizeof(struct caam_export_state),
1605 },
1606 },
1607 .alg_type = OP_ALG_ALGSEL_SHA1,
1608 }, {
1609 .name = "sha224",
1610 .driver_name = "sha224-caam",
1611 .hmac_name = "hmac(sha224)",
1612 .hmac_driver_name = "hmac-sha224-caam",
1613 .blocksize = SHA224_BLOCK_SIZE,
1614 .template_ahash = {
1615 .init = ahash_init,
1616 .update = ahash_update,
1617 .final = ahash_final,
1618 .finup = ahash_finup,
1619 .digest = ahash_digest,
1620 .export = ahash_export,
1621 .import = ahash_import,
1622 .setkey = ahash_setkey,
1623 .halg = {
1624 .digestsize = SHA224_DIGEST_SIZE,
1625 .statesize = sizeof(struct caam_export_state),
1626 },
1627 },
1628 .alg_type = OP_ALG_ALGSEL_SHA224,
1629 }, {
1630 .name = "sha256",
1631 .driver_name = "sha256-caam",
1632 .hmac_name = "hmac(sha256)",
1633 .hmac_driver_name = "hmac-sha256-caam",
1634 .blocksize = SHA256_BLOCK_SIZE,
1635 .template_ahash = {
1636 .init = ahash_init,
1637 .update = ahash_update,
1638 .final = ahash_final,
1639 .finup = ahash_finup,
1640 .digest = ahash_digest,
1641 .export = ahash_export,
1642 .import = ahash_import,
1643 .setkey = ahash_setkey,
1644 .halg = {
1645 .digestsize = SHA256_DIGEST_SIZE,
1646 .statesize = sizeof(struct caam_export_state),
1647 },
1648 },
1649 .alg_type = OP_ALG_ALGSEL_SHA256,
1650 }, {
1651 .name = "sha384",
1652 .driver_name = "sha384-caam",
1653 .hmac_name = "hmac(sha384)",
1654 .hmac_driver_name = "hmac-sha384-caam",
1655 .blocksize = SHA384_BLOCK_SIZE,
1656 .template_ahash = {
1657 .init = ahash_init,
1658 .update = ahash_update,
1659 .final = ahash_final,
1660 .finup = ahash_finup,
1661 .digest = ahash_digest,
1662 .export = ahash_export,
1663 .import = ahash_import,
1664 .setkey = ahash_setkey,
1665 .halg = {
1666 .digestsize = SHA384_DIGEST_SIZE,
1667 .statesize = sizeof(struct caam_export_state),
1668 },
1669 },
1670 .alg_type = OP_ALG_ALGSEL_SHA384,
1671 }, {
1672 .name = "sha512",
1673 .driver_name = "sha512-caam",
1674 .hmac_name = "hmac(sha512)",
1675 .hmac_driver_name = "hmac-sha512-caam",
1676 .blocksize = SHA512_BLOCK_SIZE,
1677 .template_ahash = {
1678 .init = ahash_init,
1679 .update = ahash_update,
1680 .final = ahash_final,
1681 .finup = ahash_finup,
1682 .digest = ahash_digest,
1683 .export = ahash_export,
1684 .import = ahash_import,
1685 .setkey = ahash_setkey,
1686 .halg = {
1687 .digestsize = SHA512_DIGEST_SIZE,
1688 .statesize = sizeof(struct caam_export_state),
1689 },
1690 },
1691 .alg_type = OP_ALG_ALGSEL_SHA512,
1692 }, {
1693 .name = "md5",
1694 .driver_name = "md5-caam",
1695 .hmac_name = "hmac(md5)",
1696 .hmac_driver_name = "hmac-md5-caam",
1697 .blocksize = MD5_BLOCK_WORDS * 4,
1698 .template_ahash = {
1699 .init = ahash_init,
1700 .update = ahash_update,
1701 .final = ahash_final,
1702 .finup = ahash_finup,
1703 .digest = ahash_digest,
1704 .export = ahash_export,
1705 .import = ahash_import,
1706 .setkey = ahash_setkey,
1707 .halg = {
1708 .digestsize = MD5_DIGEST_SIZE,
1709 .statesize = sizeof(struct caam_export_state),
1710 },
1711 },
1712 .alg_type = OP_ALG_ALGSEL_MD5,
1713 }, {
1714 .hmac_name = "xcbc(aes)",
1715 .hmac_driver_name = "xcbc-aes-caam",
1716 .blocksize = AES_BLOCK_SIZE,
1717 .template_ahash = {
1718 .init = ahash_init,
1719 .update = ahash_update,
1720 .final = ahash_final,
1721 .finup = ahash_finup,
1722 .digest = ahash_digest,
1723 .export = ahash_export,
1724 .import = ahash_import,
1725 .setkey = axcbc_setkey,
1726 .halg = {
1727 .digestsize = AES_BLOCK_SIZE,
1728 .statesize = sizeof(struct caam_export_state),
1729 },
1730 },
1731 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
1732 }, {
1733 .hmac_name = "cmac(aes)",
1734 .hmac_driver_name = "cmac-aes-caam",
1735 .blocksize = AES_BLOCK_SIZE,
1736 .template_ahash = {
1737 .init = ahash_init,
1738 .update = ahash_update,
1739 .final = ahash_final,
1740 .finup = ahash_finup,
1741 .digest = ahash_digest,
1742 .export = ahash_export,
1743 .import = ahash_import,
1744 .setkey = acmac_setkey,
1745 .halg = {
1746 .digestsize = AES_BLOCK_SIZE,
1747 .statesize = sizeof(struct caam_export_state),
1748 },
1749 },
1750 .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC,
1751 },
1752 };
1753
1754 struct caam_hash_alg {
1755 struct list_head entry;
1756 int alg_type;
1757 struct ahash_engine_alg ahash_alg;
1758 };
1759
caam_hash_cra_init(struct crypto_tfm * tfm)1760 static int caam_hash_cra_init(struct crypto_tfm *tfm)
1761 {
1762 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
1763 struct crypto_alg *base = tfm->__crt_alg;
1764 struct hash_alg_common *halg =
1765 container_of(base, struct hash_alg_common, base);
1766 struct ahash_alg *alg =
1767 container_of(halg, struct ahash_alg, halg);
1768 struct caam_hash_alg *caam_hash =
1769 container_of(alg, struct caam_hash_alg, ahash_alg.base);
1770 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1771 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
1772 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
1773 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
1774 HASH_MSG_LEN + 32,
1775 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
1776 HASH_MSG_LEN + 64,
1777 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
1778 const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx,
1779 sh_desc_update);
1780 dma_addr_t dma_addr;
1781 struct caam_drv_private *priv;
1782
1783 /*
1784 * Get a Job ring from Job Ring driver to ensure in-order
1785 * crypto request processing per tfm
1786 */
1787 ctx->jrdev = caam_jr_alloc();
1788 if (IS_ERR(ctx->jrdev)) {
1789 pr_err("Job Ring Device allocation for transform failed\n");
1790 return PTR_ERR(ctx->jrdev);
1791 }
1792
1793 priv = dev_get_drvdata(ctx->jrdev->parent);
1794
1795 if (is_xcbc_aes(caam_hash->alg_type)) {
1796 ctx->dir = DMA_TO_DEVICE;
1797 ctx->key_dir = DMA_BIDIRECTIONAL;
1798 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1799 ctx->ctx_len = 48;
1800 } else if (is_cmac_aes(caam_hash->alg_type)) {
1801 ctx->dir = DMA_TO_DEVICE;
1802 ctx->key_dir = DMA_NONE;
1803 ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1804 ctx->ctx_len = 32;
1805 } else {
1806 if (priv->era >= 6) {
1807 ctx->dir = DMA_BIDIRECTIONAL;
1808 ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
1809 } else {
1810 ctx->dir = DMA_TO_DEVICE;
1811 ctx->key_dir = DMA_NONE;
1812 }
1813 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1814 ctx->ctx_len = runninglen[(ctx->adata.algtype &
1815 OP_ALG_ALGSEL_SUBMASK) >>
1816 OP_ALG_ALGSEL_SHIFT];
1817 }
1818
1819 if (ctx->key_dir != DMA_NONE) {
1820 ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1821 ARRAY_SIZE(ctx->key),
1822 ctx->key_dir,
1823 DMA_ATTR_SKIP_CPU_SYNC);
1824 if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) {
1825 dev_err(ctx->jrdev, "unable to map key\n");
1826 caam_jr_free(ctx->jrdev);
1827 return -ENOMEM;
1828 }
1829 }
1830
1831 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
1832 offsetof(struct caam_hash_ctx, key) -
1833 sh_desc_update_offset,
1834 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1835 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1836 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1837
1838 if (ctx->key_dir != DMA_NONE)
1839 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1840 ARRAY_SIZE(ctx->key),
1841 ctx->key_dir,
1842 DMA_ATTR_SKIP_CPU_SYNC);
1843
1844 caam_jr_free(ctx->jrdev);
1845 return -ENOMEM;
1846 }
1847
1848 ctx->sh_desc_update_dma = dma_addr;
1849 ctx->sh_desc_update_first_dma = dma_addr +
1850 offsetof(struct caam_hash_ctx,
1851 sh_desc_update_first) -
1852 sh_desc_update_offset;
1853 ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
1854 sh_desc_fin) -
1855 sh_desc_update_offset;
1856 ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
1857 sh_desc_digest) -
1858 sh_desc_update_offset;
1859
1860 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
1861
1862 /*
1863 * For keyed hash algorithms shared descriptors
1864 * will be created later in setkey() callback
1865 */
1866 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
1867 }
1868
caam_hash_cra_exit(struct crypto_tfm * tfm)1869 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1870 {
1871 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1872
1873 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1874 offsetof(struct caam_hash_ctx, key) -
1875 offsetof(struct caam_hash_ctx, sh_desc_update),
1876 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1877 if (ctx->key_dir != DMA_NONE)
1878 dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma,
1879 ARRAY_SIZE(ctx->key), ctx->key_dir,
1880 DMA_ATTR_SKIP_CPU_SYNC);
1881 caam_jr_free(ctx->jrdev);
1882 }
1883
caam_algapi_hash_exit(void)1884 void caam_algapi_hash_exit(void)
1885 {
1886 struct caam_hash_alg *t_alg, *n;
1887
1888 if (!hash_list.next)
1889 return;
1890
1891 list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
1892 crypto_engine_unregister_ahash(&t_alg->ahash_alg);
1893 list_del(&t_alg->entry);
1894 kfree(t_alg);
1895 }
1896 }
1897
1898 static struct caam_hash_alg *
caam_hash_alloc(struct caam_hash_template * template,bool keyed)1899 caam_hash_alloc(struct caam_hash_template *template,
1900 bool keyed)
1901 {
1902 struct caam_hash_alg *t_alg;
1903 struct ahash_alg *halg;
1904 struct crypto_alg *alg;
1905
1906 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
1907 if (!t_alg)
1908 return ERR_PTR(-ENOMEM);
1909
1910 t_alg->ahash_alg.base = template->template_ahash;
1911 halg = &t_alg->ahash_alg.base;
1912 alg = &halg->halg.base;
1913
1914 if (keyed) {
1915 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1916 template->hmac_name);
1917 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1918 template->hmac_driver_name);
1919 } else {
1920 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1921 template->name);
1922 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1923 template->driver_name);
1924 halg->setkey = NULL;
1925 }
1926 alg->cra_module = THIS_MODULE;
1927 alg->cra_init = caam_hash_cra_init;
1928 alg->cra_exit = caam_hash_cra_exit;
1929 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
1930 alg->cra_priority = CAAM_CRA_PRIORITY;
1931 alg->cra_blocksize = template->blocksize;
1932 alg->cra_alignmask = 0;
1933 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
1934
1935 t_alg->alg_type = template->alg_type;
1936 t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
1937
1938 return t_alg;
1939 }
1940
caam_algapi_hash_init(struct device * ctrldev)1941 int caam_algapi_hash_init(struct device *ctrldev)
1942 {
1943 int i = 0, err = 0;
1944 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
1945 unsigned int md_limit = SHA512_DIGEST_SIZE;
1946 u32 md_inst, md_vid;
1947
1948 /*
1949 * Register crypto algorithms the device supports. First, identify
1950 * presence and attributes of MD block.
1951 */
1952 if (priv->era < 10) {
1953 struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon;
1954
1955 md_vid = (rd_reg32(&perfmon->cha_id_ls) &
1956 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1957 md_inst = (rd_reg32(&perfmon->cha_num_ls) &
1958 CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
1959 } else {
1960 u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha);
1961
1962 md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
1963 md_inst = mdha & CHA_VER_NUM_MASK;
1964 }
1965
1966 /*
1967 * Skip registration of any hashing algorithms if MD block
1968 * is not present.
1969 */
1970 if (!md_inst)
1971 return 0;
1972
1973 /* Limit digest size based on LP256 */
1974 if (md_vid == CHA_VER_VID_MD_LP256)
1975 md_limit = SHA256_DIGEST_SIZE;
1976
1977 INIT_LIST_HEAD(&hash_list);
1978
1979 /* register crypto algorithms the device supports */
1980 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
1981 struct caam_hash_alg *t_alg;
1982 struct caam_hash_template *alg = driver_hash + i;
1983
1984 /* If MD size is not supported by device, skip registration */
1985 if (is_mdha(alg->alg_type) &&
1986 alg->template_ahash.halg.digestsize > md_limit)
1987 continue;
1988
1989 /* register hmac version */
1990 t_alg = caam_hash_alloc(alg, true);
1991 if (IS_ERR(t_alg)) {
1992 err = PTR_ERR(t_alg);
1993 pr_warn("%s alg allocation failed\n",
1994 alg->hmac_driver_name);
1995 continue;
1996 }
1997
1998 err = crypto_engine_register_ahash(&t_alg->ahash_alg);
1999 if (err) {
2000 pr_warn("%s alg registration failed: %d\n",
2001 t_alg->ahash_alg.base.halg.base.cra_driver_name,
2002 err);
2003 kfree(t_alg);
2004 } else
2005 list_add_tail(&t_alg->entry, &hash_list);
2006
2007 if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2008 continue;
2009
2010 /* register unkeyed version */
2011 t_alg = caam_hash_alloc(alg, false);
2012 if (IS_ERR(t_alg)) {
2013 err = PTR_ERR(t_alg);
2014 pr_warn("%s alg allocation failed\n", alg->driver_name);
2015 continue;
2016 }
2017
2018 err = crypto_engine_register_ahash(&t_alg->ahash_alg);
2019 if (err) {
2020 pr_warn("%s alg registration failed: %d\n",
2021 t_alg->ahash_alg.base.halg.base.cra_driver_name,
2022 err);
2023 kfree(t_alg);
2024 } else
2025 list_add_tail(&t_alg->entry, &hash_list);
2026 }
2027
2028 return err;
2029 }
2030