1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ss-hash.c - hardware cryptographic offloader for
4 * Allwinner A80/A83T SoC
5 *
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7 *
8 * This file add support for MD5 and SHA1/SHA224/SHA256.
9 *
10 * You could find the datasheet in Documentation/arch/arm/sunxi.rst
11 */
12
13 #include <crypto/hmac.h>
14 #include <crypto/internal/hash.h>
15 #include <crypto/md5.h>
16 #include <crypto/scatterwalk.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <linux/bottom_half.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/kernel.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include "sun8i-ss.h"
28
sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx * tfmctx,const u8 * key,unsigned int keylen)29 static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
30 unsigned int keylen)
31 {
32 struct crypto_shash *xtfm;
33 struct shash_desc *sdesc;
34 size_t len;
35 int ret = 0;
36
37 xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
38 if (IS_ERR(xtfm))
39 return PTR_ERR(xtfm);
40
41 len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
42 sdesc = kmalloc(len, GFP_KERNEL);
43 if (!sdesc) {
44 ret = -ENOMEM;
45 goto err_hashkey_sdesc;
46 }
47 sdesc->tfm = xtfm;
48
49 ret = crypto_shash_init(sdesc);
50 if (ret) {
51 dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
52 goto err_hashkey;
53 }
54 ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
55 if (ret)
56 dev_err(tfmctx->ss->dev, "shash finup error\n");
57 err_hashkey:
58 kfree(sdesc);
59 err_hashkey_sdesc:
60 crypto_free_shash(xtfm);
61 return ret;
62 }
63
sun8i_ss_hmac_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)64 int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
65 unsigned int keylen)
66 {
67 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
68 int digestsize, i;
69 int bs = crypto_ahash_blocksize(ahash);
70 int ret;
71
72 digestsize = crypto_ahash_digestsize(ahash);
73
74 if (keylen > bs) {
75 ret = sun8i_ss_hashkey(tfmctx, key, keylen);
76 if (ret)
77 return ret;
78 tfmctx->keylen = digestsize;
79 } else {
80 tfmctx->keylen = keylen;
81 memcpy(tfmctx->key, key, keylen);
82 }
83
84 tfmctx->ipad = kzalloc(bs, GFP_KERNEL);
85 if (!tfmctx->ipad)
86 return -ENOMEM;
87 tfmctx->opad = kzalloc(bs, GFP_KERNEL);
88 if (!tfmctx->opad) {
89 ret = -ENOMEM;
90 goto err_opad;
91 }
92
93 memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
94 memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
95 memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
96 for (i = 0; i < bs; i++) {
97 tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
98 tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
99 }
100
101 ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
102 if (!ret)
103 return 0;
104
105 memzero_explicit(tfmctx->key, keylen);
106 kfree_sensitive(tfmctx->opad);
107 err_opad:
108 kfree_sensitive(tfmctx->ipad);
109 return ret;
110 }
111
sun8i_ss_hash_init_tfm(struct crypto_ahash * tfm)112 int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
113 {
114 struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
115 struct ahash_alg *alg = crypto_ahash_alg(tfm);
116 struct sun8i_ss_alg_template *algt;
117 int err;
118
119 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
120 op->ss = algt->ss;
121
122 /* FALLBACK */
123 op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
124 CRYPTO_ALG_NEED_FALLBACK);
125 if (IS_ERR(op->fallback_tfm)) {
126 dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
127 return PTR_ERR(op->fallback_tfm);
128 }
129
130 crypto_ahash_set_statesize(tfm,
131 crypto_ahash_statesize(op->fallback_tfm));
132
133 crypto_ahash_set_reqsize(tfm,
134 sizeof(struct sun8i_ss_hash_reqctx) +
135 crypto_ahash_reqsize(op->fallback_tfm));
136
137 memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
138 CRYPTO_MAX_ALG_NAME);
139
140 err = pm_runtime_get_sync(op->ss->dev);
141 if (err < 0)
142 goto error_pm;
143 return 0;
144 error_pm:
145 pm_runtime_put_noidle(op->ss->dev);
146 crypto_free_ahash(op->fallback_tfm);
147 return err;
148 }
149
sun8i_ss_hash_exit_tfm(struct crypto_ahash * tfm)150 void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
151 {
152 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
153
154 kfree_sensitive(tfmctx->ipad);
155 kfree_sensitive(tfmctx->opad);
156
157 crypto_free_ahash(tfmctx->fallback_tfm);
158 pm_runtime_put_sync_suspend(tfmctx->ss->dev);
159 }
160
sun8i_ss_hash_init(struct ahash_request * areq)161 int sun8i_ss_hash_init(struct ahash_request *areq)
162 {
163 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
164 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
165 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
166
167 memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
168
169 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
170 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
171
172 return crypto_ahash_init(&rctx->fallback_req);
173 }
174
sun8i_ss_hash_export(struct ahash_request * areq,void * out)175 int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
176 {
177 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
178 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
179 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
180
181 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
182 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
183
184 return crypto_ahash_export(&rctx->fallback_req, out);
185 }
186
sun8i_ss_hash_import(struct ahash_request * areq,const void * in)187 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
188 {
189 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
190 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
191 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
192
193 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
194 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
195
196 return crypto_ahash_import(&rctx->fallback_req, in);
197 }
198
sun8i_ss_hash_final(struct ahash_request * areq)199 int sun8i_ss_hash_final(struct ahash_request *areq)
200 {
201 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
202 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
203 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
204
205 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
206 rctx->fallback_req.base.flags = areq->base.flags &
207 CRYPTO_TFM_REQ_MAY_SLEEP;
208 rctx->fallback_req.result = areq->result;
209
210 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
211 struct ahash_alg *alg = crypto_ahash_alg(tfm);
212 struct sun8i_ss_alg_template *algt __maybe_unused;
213
214 algt = container_of(alg, struct sun8i_ss_alg_template,
215 alg.hash.base);
216
217 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
218 algt->stat_fb++;
219 #endif
220 }
221
222 return crypto_ahash_final(&rctx->fallback_req);
223 }
224
sun8i_ss_hash_update(struct ahash_request * areq)225 int sun8i_ss_hash_update(struct ahash_request *areq)
226 {
227 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
228 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
229 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
230
231 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
232 rctx->fallback_req.base.flags = areq->base.flags &
233 CRYPTO_TFM_REQ_MAY_SLEEP;
234 rctx->fallback_req.nbytes = areq->nbytes;
235 rctx->fallback_req.src = areq->src;
236
237 return crypto_ahash_update(&rctx->fallback_req);
238 }
239
sun8i_ss_hash_finup(struct ahash_request * areq)240 int sun8i_ss_hash_finup(struct ahash_request *areq)
241 {
242 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
243 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
244 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
245
246 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
247 rctx->fallback_req.base.flags = areq->base.flags &
248 CRYPTO_TFM_REQ_MAY_SLEEP;
249
250 rctx->fallback_req.nbytes = areq->nbytes;
251 rctx->fallback_req.src = areq->src;
252 rctx->fallback_req.result = areq->result;
253
254 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
255 struct ahash_alg *alg = crypto_ahash_alg(tfm);
256 struct sun8i_ss_alg_template *algt __maybe_unused;
257
258 algt = container_of(alg, struct sun8i_ss_alg_template,
259 alg.hash.base);
260
261 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
262 algt->stat_fb++;
263 #endif
264 }
265
266 return crypto_ahash_finup(&rctx->fallback_req);
267 }
268
sun8i_ss_hash_digest_fb(struct ahash_request * areq)269 static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
270 {
271 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
272 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
273 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
274
275 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
276 rctx->fallback_req.base.flags = areq->base.flags &
277 CRYPTO_TFM_REQ_MAY_SLEEP;
278
279 rctx->fallback_req.nbytes = areq->nbytes;
280 rctx->fallback_req.src = areq->src;
281 rctx->fallback_req.result = areq->result;
282
283 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
284 struct ahash_alg *alg = crypto_ahash_alg(tfm);
285 struct sun8i_ss_alg_template *algt __maybe_unused;
286
287 algt = container_of(alg, struct sun8i_ss_alg_template,
288 alg.hash.base);
289
290 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
291 algt->stat_fb++;
292 #endif
293 }
294
295 return crypto_ahash_digest(&rctx->fallback_req);
296 }
297
sun8i_ss_run_hash_task(struct sun8i_ss_dev * ss,struct sun8i_ss_hash_reqctx * rctx,const char * name)298 static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
299 struct sun8i_ss_hash_reqctx *rctx,
300 const char *name)
301 {
302 int flow = rctx->flow;
303 u32 v = SS_START;
304 int i;
305
306 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
307 ss->flows[flow].stat_req++;
308 #endif
309
310 /* choose between stream0/stream1 */
311 if (flow)
312 v |= SS_FLOW1;
313 else
314 v |= SS_FLOW0;
315
316 v |= rctx->method;
317
318 for (i = 0; i < MAX_SG; i++) {
319 if (!rctx->t_dst[i].addr)
320 break;
321
322 mutex_lock(&ss->mlock);
323 if (i > 0) {
324 v |= BIT(17);
325 writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
326 writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
327 }
328
329 dev_dbg(ss->dev,
330 "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
331 i, flow, name, v,
332 rctx->t_src[i].len, rctx->t_dst[i].len,
333 rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
334
335 writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
336 writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
337 writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
338 writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
339
340 reinit_completion(&ss->flows[flow].complete);
341 ss->flows[flow].status = 0;
342 wmb();
343
344 writel(v, ss->base + SS_CTL_REG);
345 mutex_unlock(&ss->mlock);
346 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
347 msecs_to_jiffies(2000));
348 if (ss->flows[flow].status == 0) {
349 dev_err(ss->dev, "DMA timeout for %s\n", name);
350 return -EFAULT;
351 }
352 }
353
354 return 0;
355 }
356
sun8i_ss_hash_need_fallback(struct ahash_request * areq)357 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
358 {
359 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
360 struct ahash_alg *alg = crypto_ahash_alg(tfm);
361 struct sun8i_ss_alg_template *algt;
362 struct scatterlist *sg;
363
364 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
365
366 if (areq->nbytes == 0) {
367 algt->stat_fb_len++;
368 return true;
369 }
370
371 if (areq->nbytes >= MAX_PAD_SIZE - 64) {
372 algt->stat_fb_len++;
373 return true;
374 }
375
376 /* we need to reserve one SG for the padding one */
377 if (sg_nents(areq->src) > MAX_SG - 1) {
378 algt->stat_fb_sgnum++;
379 return true;
380 }
381
382 sg = areq->src;
383 while (sg) {
384 /* SS can operate hash only on full block size
385 * since SS support only MD5,sha1,sha224 and sha256, blocksize
386 * is always 64
387 */
388 /* Only the last block could be bounced to the pad buffer */
389 if (sg->length % 64 && sg_next(sg)) {
390 algt->stat_fb_sglen++;
391 return true;
392 }
393 if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
394 algt->stat_fb_align++;
395 return true;
396 }
397 if (sg->length % 4) {
398 algt->stat_fb_sglen++;
399 return true;
400 }
401 sg = sg_next(sg);
402 }
403 return false;
404 }
405
sun8i_ss_hash_digest(struct ahash_request * areq)406 int sun8i_ss_hash_digest(struct ahash_request *areq)
407 {
408 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
409 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
410 struct ahash_alg *alg = crypto_ahash_alg(tfm);
411 struct sun8i_ss_alg_template *algt;
412 struct sun8i_ss_dev *ss;
413 struct crypto_engine *engine;
414 int e;
415
416 if (sun8i_ss_hash_need_fallback(areq))
417 return sun8i_ss_hash_digest_fb(areq);
418
419 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
420 ss = algt->ss;
421
422 e = sun8i_ss_get_engine_number(ss);
423 rctx->flow = e;
424 engine = ss->flows[e].engine;
425
426 return crypto_transfer_hash_request_to_engine(engine, areq);
427 }
428
hash_pad(__le32 * buf,unsigned int bufsize,u64 padi,u64 byte_count,bool le,int bs)429 static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
430 {
431 u64 fill, min_fill, j, k;
432 __be64 *bebits;
433 __le64 *lebits;
434
435 j = padi;
436 buf[j++] = cpu_to_le32(0x80);
437
438 if (bs == 64) {
439 fill = 64 - (byte_count % 64);
440 min_fill = 2 * sizeof(u32) + sizeof(u32);
441 } else {
442 fill = 128 - (byte_count % 128);
443 min_fill = 4 * sizeof(u32) + sizeof(u32);
444 }
445
446 if (fill < min_fill)
447 fill += bs;
448
449 k = j;
450 j += (fill - min_fill) / sizeof(u32);
451 if (j * 4 > bufsize) {
452 pr_err("%s OVERFLOW %llu\n", __func__, j);
453 return 0;
454 }
455 for (; k < j; k++)
456 buf[k] = 0;
457
458 if (le) {
459 /* MD5 */
460 lebits = (__le64 *)&buf[j];
461 *lebits = cpu_to_le64(byte_count << 3);
462 j += 2;
463 } else {
464 if (bs == 64) {
465 /* sha1 sha224 sha256 */
466 bebits = (__be64 *)&buf[j];
467 *bebits = cpu_to_be64(byte_count << 3);
468 j += 2;
469 } else {
470 /* sha384 sha512*/
471 bebits = (__be64 *)&buf[j];
472 *bebits = cpu_to_be64(byte_count >> 61);
473 j += 2;
474 bebits = (__be64 *)&buf[j];
475 *bebits = cpu_to_be64(byte_count << 3);
476 j += 2;
477 }
478 }
479 if (j * 4 > bufsize) {
480 pr_err("%s OVERFLOW %llu\n", __func__, j);
481 return 0;
482 }
483
484 return j;
485 }
486
487 /* sun8i_ss_hash_run - run an ahash request
488 * Send the data of the request to the SS along with an extra SG with padding
489 */
sun8i_ss_hash_run(struct crypto_engine * engine,void * breq)490 int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
491 {
492 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
493 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
494 struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
495 struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
496 struct ahash_alg *alg = crypto_ahash_alg(tfm);
497 struct sun8i_ss_alg_template *algt;
498 struct sun8i_ss_dev *ss;
499 struct scatterlist *sg;
500 int bs = crypto_ahash_blocksize(tfm);
501 int nr_sgs, err, digestsize;
502 unsigned int len;
503 u64 byte_count;
504 void *pad, *result;
505 int j, i, k, todo;
506 dma_addr_t addr_res, addr_pad, addr_xpad;
507 __le32 *bf;
508 /* HMAC step:
509 * 0: normal hashing
510 * 1: IPAD
511 * 2: OPAD
512 */
513 int hmac = 0;
514
515 algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
516 ss = algt->ss;
517
518 digestsize = crypto_ahash_digestsize(tfm);
519 if (digestsize == SHA224_DIGEST_SIZE)
520 digestsize = SHA256_DIGEST_SIZE;
521
522 result = ss->flows[rctx->flow].result;
523 pad = ss->flows[rctx->flow].pad;
524 bf = (__le32 *)pad;
525
526 for (i = 0; i < MAX_SG; i++) {
527 rctx->t_dst[i].addr = 0;
528 rctx->t_dst[i].len = 0;
529 }
530
531 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
532 algt->stat_req++;
533 #endif
534
535 rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
536
537 nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
538 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
539 dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
540 err = -EINVAL;
541 goto theend;
542 }
543
544 addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
545 if (dma_mapping_error(ss->dev, addr_res)) {
546 dev_err(ss->dev, "DMA map dest\n");
547 err = -EINVAL;
548 goto err_dma_result;
549 }
550
551 j = 0;
552 len = areq->nbytes;
553 sg = areq->src;
554 i = 0;
555 while (len > 0 && sg) {
556 if (sg_dma_len(sg) == 0) {
557 sg = sg_next(sg);
558 continue;
559 }
560 todo = min(len, sg_dma_len(sg));
561 /* only the last SG could be with a size not modulo64 */
562 if (todo % 64 == 0) {
563 rctx->t_src[i].addr = sg_dma_address(sg);
564 rctx->t_src[i].len = todo / 4;
565 rctx->t_dst[i].addr = addr_res;
566 rctx->t_dst[i].len = digestsize / 4;
567 len -= todo;
568 } else {
569 scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
570 j += todo / 4;
571 len -= todo;
572 }
573 sg = sg_next(sg);
574 i++;
575 }
576 if (len > 0) {
577 dev_err(ss->dev, "remaining len %d\n", len);
578 err = -EINVAL;
579 goto theend;
580 }
581
582 if (j > 0)
583 i--;
584
585 retry:
586 byte_count = areq->nbytes;
587 if (tfmctx->keylen && hmac == 0) {
588 hmac = 1;
589 /* shift all SG one slot up, to free slot 0 for IPAD */
590 for (k = 6; k >= 0; k--) {
591 rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
592 rctx->t_src[k + 1].len = rctx->t_src[k].len;
593 rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
594 rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
595 }
596 addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
597 err = dma_mapping_error(ss->dev, addr_xpad);
598 if (err) {
599 dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
600 goto err_dma_xpad;
601 }
602 rctx->t_src[0].addr = addr_xpad;
603 rctx->t_src[0].len = bs / 4;
604 rctx->t_dst[0].addr = addr_res;
605 rctx->t_dst[0].len = digestsize / 4;
606 i++;
607 byte_count = areq->nbytes + bs;
608 }
609 if (tfmctx->keylen && hmac == 2) {
610 for (i = 0; i < MAX_SG; i++) {
611 rctx->t_src[i].addr = 0;
612 rctx->t_src[i].len = 0;
613 rctx->t_dst[i].addr = 0;
614 rctx->t_dst[i].len = 0;
615 }
616
617 addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
618 if (dma_mapping_error(ss->dev, addr_res)) {
619 dev_err(ss->dev, "Fail to create DMA mapping of result\n");
620 err = -EINVAL;
621 goto err_dma_result;
622 }
623 addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
624 err = dma_mapping_error(ss->dev, addr_xpad);
625 if (err) {
626 dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
627 goto err_dma_xpad;
628 }
629 rctx->t_src[0].addr = addr_xpad;
630 rctx->t_src[0].len = bs / 4;
631
632 memcpy(bf, result, digestsize);
633 j = digestsize / 4;
634 i = 1;
635 byte_count = digestsize + bs;
636
637 rctx->t_dst[0].addr = addr_res;
638 rctx->t_dst[0].len = digestsize / 4;
639 }
640
641 switch (algt->ss_algo_id) {
642 case SS_ID_HASH_MD5:
643 j = hash_pad(bf, 4096, j, byte_count, true, bs);
644 break;
645 case SS_ID_HASH_SHA1:
646 case SS_ID_HASH_SHA224:
647 case SS_ID_HASH_SHA256:
648 j = hash_pad(bf, 4096, j, byte_count, false, bs);
649 break;
650 }
651 if (!j) {
652 err = -EINVAL;
653 goto theend;
654 }
655
656 addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
657 if (dma_mapping_error(ss->dev, addr_pad)) {
658 dev_err(ss->dev, "DMA error on padding SG\n");
659 err = -EINVAL;
660 goto err_dma_pad;
661 }
662 rctx->t_src[i].addr = addr_pad;
663 rctx->t_src[i].len = j;
664 rctx->t_dst[i].addr = addr_res;
665 rctx->t_dst[i].len = digestsize / 4;
666
667 err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
668
669 /*
670 * mini helper for checking dma map/unmap
671 * flow start for hmac = 0 (and HMAC = 1)
672 * HMAC = 0
673 * MAP src
674 * MAP res
675 *
676 * retry:
677 * if hmac then hmac = 1
678 * MAP xpad (ipad)
679 * if hmac == 2
680 * MAP res
681 * MAP xpad (opad)
682 * MAP pad
683 * ACTION!
684 * UNMAP pad
685 * if hmac
686 * UNMAP xpad
687 * UNMAP res
688 * if hmac < 2
689 * UNMAP SRC
690 *
691 * if hmac = 1 then hmac = 2 goto retry
692 */
693
694 dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
695
696 err_dma_pad:
697 if (hmac > 0)
698 dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
699 err_dma_xpad:
700 dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
701 err_dma_result:
702 if (hmac < 2)
703 dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
704 DMA_TO_DEVICE);
705 if (hmac == 1 && !err) {
706 hmac = 2;
707 goto retry;
708 }
709
710 if (!err)
711 memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
712 theend:
713 local_bh_disable();
714 crypto_finalize_hash_request(engine, breq, err);
715 local_bh_enable();
716 return 0;
717 }
718