1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4  *
5  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6  *
7  * This file add support for AES cipher with 128,192,256 bits
8  * keysize in CBC and ECB mode.
9  * Add support also for DES and 3DES in CBC and ECB mode.
10  *
11  * You could find the datasheet in Documentation/arm/sunxi.rst
12  */
13 #include "sun4i-ss.h"
14 
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 	struct sun4i_ss_ctx *ss = op->ss;
20 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 	u32 mode = ctx->mode;
23 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 	u32 rx_cnt = SS_RX_DEFAULT;
25 	u32 tx_cnt = 0;
26 	u32 spaces;
27 	u32 v;
28 	int err = 0;
29 	unsigned int i;
30 	unsigned int ileft = areq->cryptlen;
31 	unsigned int oleft = areq->cryptlen;
32 	unsigned int todo;
33 	struct sg_mapping_iter mi, mo;
34 	unsigned int oi, oo; /* offset for in and out */
35 	unsigned long flags;
36 
37 	if (!areq->cryptlen)
38 		return 0;
39 
40 	if (!areq->src || !areq->dst) {
41 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
42 		return -EINVAL;
43 	}
44 
45 	spin_lock_irqsave(&ss->slock, flags);
46 
47 	for (i = 0; i < op->keylen; i += 4)
48 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
49 
50 	if (areq->iv) {
51 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
52 			v = *(u32 *)(areq->iv + i * 4);
53 			writel(v, ss->base + SS_IV0 + i * 4);
54 		}
55 	}
56 	writel(mode, ss->base + SS_CTL);
57 
58 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
59 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
60 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
61 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
62 	sg_miter_next(&mi);
63 	sg_miter_next(&mo);
64 	if (!mi.addr || !mo.addr) {
65 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
66 		err = -EINVAL;
67 		goto release_ss;
68 	}
69 
70 	ileft = areq->cryptlen / 4;
71 	oleft = areq->cryptlen / 4;
72 	oi = 0;
73 	oo = 0;
74 	do {
75 		todo = min(rx_cnt, ileft);
76 		todo = min_t(size_t, todo, (mi.length - oi) / 4);
77 		if (todo) {
78 			ileft -= todo;
79 			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
80 			oi += todo * 4;
81 		}
82 		if (oi == mi.length) {
83 			sg_miter_next(&mi);
84 			oi = 0;
85 		}
86 
87 		spaces = readl(ss->base + SS_FCSR);
88 		rx_cnt = SS_RXFIFO_SPACES(spaces);
89 		tx_cnt = SS_TXFIFO_SPACES(spaces);
90 
91 		todo = min(tx_cnt, oleft);
92 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
93 		if (todo) {
94 			oleft -= todo;
95 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
96 			oo += todo * 4;
97 		}
98 		if (oo == mo.length) {
99 			sg_miter_next(&mo);
100 			oo = 0;
101 		}
102 	} while (oleft);
103 
104 	if (areq->iv) {
105 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
106 			v = readl(ss->base + SS_IV0 + i * 4);
107 			*(u32 *)(areq->iv + i * 4) = v;
108 		}
109 	}
110 
111 release_ss:
112 	sg_miter_stop(&mi);
113 	sg_miter_stop(&mo);
114 	writel(0, ss->base + SS_CTL);
115 	spin_unlock_irqrestore(&ss->slock, flags);
116 	return err;
117 }
118 
119 
120 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
121 {
122 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
123 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
124 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
125 	int err;
126 
127 	skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
128 	skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
129 				      areq->base.complete, areq->base.data);
130 	skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
131 				   areq->cryptlen, areq->iv);
132 	if (ctx->mode & SS_DECRYPTION)
133 		err = crypto_skcipher_decrypt(&ctx->fallback_req);
134 	else
135 		err = crypto_skcipher_encrypt(&ctx->fallback_req);
136 
137 	return err;
138 }
139 
140 /* Generic function that support SG with size not multiple of 4 */
141 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
142 {
143 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
144 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
145 	struct sun4i_ss_ctx *ss = op->ss;
146 	int no_chunk = 1;
147 	struct scatterlist *in_sg = areq->src;
148 	struct scatterlist *out_sg = areq->dst;
149 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
150 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
151 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
152 	struct sun4i_ss_alg_template *algt;
153 	u32 mode = ctx->mode;
154 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
155 	u32 rx_cnt = SS_RX_DEFAULT;
156 	u32 tx_cnt = 0;
157 	u32 v;
158 	u32 spaces;
159 	int err = 0;
160 	unsigned int i;
161 	unsigned int ileft = areq->cryptlen;
162 	unsigned int oleft = areq->cryptlen;
163 	unsigned int todo;
164 	struct sg_mapping_iter mi, mo;
165 	unsigned int oi, oo;	/* offset for in and out */
166 	unsigned int ob = 0;	/* offset in buf */
167 	unsigned int obo = 0;	/* offset in bufo*/
168 	unsigned int obl = 0;	/* length of data in bufo */
169 	unsigned long flags;
170 	bool need_fallback;
171 
172 	if (!areq->cryptlen)
173 		return 0;
174 
175 	if (!areq->src || !areq->dst) {
176 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
177 		return -EINVAL;
178 	}
179 
180 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
181 	if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
182 		need_fallback = true;
183 
184 	/*
185 	 * if we have only SGs with size multiple of 4,
186 	 * we can use the SS optimized function
187 	 */
188 	while (in_sg && no_chunk == 1) {
189 		if (in_sg->length % 4)
190 			no_chunk = 0;
191 		in_sg = sg_next(in_sg);
192 	}
193 	while (out_sg && no_chunk == 1) {
194 		if (out_sg->length % 4)
195 			no_chunk = 0;
196 		out_sg = sg_next(out_sg);
197 	}
198 
199 	if (no_chunk == 1 && !need_fallback)
200 		return sun4i_ss_opti_poll(areq);
201 
202 	if (need_fallback)
203 		return sun4i_ss_cipher_poll_fallback(areq);
204 
205 	spin_lock_irqsave(&ss->slock, flags);
206 
207 	for (i = 0; i < op->keylen; i += 4)
208 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
209 
210 	if (areq->iv) {
211 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
212 			v = *(u32 *)(areq->iv + i * 4);
213 			writel(v, ss->base + SS_IV0 + i * 4);
214 		}
215 	}
216 	writel(mode, ss->base + SS_CTL);
217 
218 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
219 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
220 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
221 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
222 	sg_miter_next(&mi);
223 	sg_miter_next(&mo);
224 	if (!mi.addr || !mo.addr) {
225 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
226 		err = -EINVAL;
227 		goto release_ss;
228 	}
229 	ileft = areq->cryptlen;
230 	oleft = areq->cryptlen;
231 	oi = 0;
232 	oo = 0;
233 
234 	while (oleft) {
235 		if (ileft) {
236 			char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
237 
238 			/*
239 			 * todo is the number of consecutive 4byte word that we
240 			 * can read from current SG
241 			 */
242 			todo = min(rx_cnt, ileft / 4);
243 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
244 			if (todo && !ob) {
245 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
246 					todo);
247 				ileft -= todo * 4;
248 				oi += todo * 4;
249 			} else {
250 				/*
251 				 * not enough consecutive bytes, so we need to
252 				 * linearize in buf. todo is in bytes
253 				 * After that copy, if we have a multiple of 4
254 				 * we need to be able to write all buf in one
255 				 * pass, so it is why we min() with rx_cnt
256 				 */
257 				todo = min(rx_cnt * 4 - ob, ileft);
258 				todo = min_t(size_t, todo, mi.length - oi);
259 				memcpy(buf + ob, mi.addr + oi, todo);
260 				ileft -= todo;
261 				oi += todo;
262 				ob += todo;
263 				if (!(ob % 4)) {
264 					writesl(ss->base + SS_RXFIFO, buf,
265 						ob / 4);
266 					ob = 0;
267 				}
268 			}
269 			if (oi == mi.length) {
270 				sg_miter_next(&mi);
271 				oi = 0;
272 			}
273 		}
274 
275 		spaces = readl(ss->base + SS_FCSR);
276 		rx_cnt = SS_RXFIFO_SPACES(spaces);
277 		tx_cnt = SS_TXFIFO_SPACES(spaces);
278 		dev_dbg(ss->dev,
279 			"%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
280 			mode,
281 			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
282 			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
283 
284 		if (!tx_cnt)
285 			continue;
286 		/* todo in 4bytes word */
287 		todo = min(tx_cnt, oleft / 4);
288 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
289 		if (todo) {
290 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
291 			oleft -= todo * 4;
292 			oo += todo * 4;
293 			if (oo == mo.length) {
294 				sg_miter_next(&mo);
295 				oo = 0;
296 			}
297 		} else {
298 			char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
299 
300 			/*
301 			 * read obl bytes in bufo, we read at maximum for
302 			 * emptying the device
303 			 */
304 			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
305 			obl = tx_cnt * 4;
306 			obo = 0;
307 			do {
308 				/*
309 				 * how many bytes we can copy ?
310 				 * no more than remaining SG size
311 				 * no more than remaining buffer
312 				 * no need to test against oleft
313 				 */
314 				todo = min_t(size_t,
315 					     mo.length - oo, obl - obo);
316 				memcpy(mo.addr + oo, bufo + obo, todo);
317 				oleft -= todo;
318 				obo += todo;
319 				oo += todo;
320 				if (oo == mo.length) {
321 					sg_miter_next(&mo);
322 					oo = 0;
323 				}
324 			} while (obo < obl);
325 			/* bufo must be fully used here */
326 		}
327 	}
328 	if (areq->iv) {
329 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
330 			v = readl(ss->base + SS_IV0 + i * 4);
331 			*(u32 *)(areq->iv + i * 4) = v;
332 		}
333 	}
334 
335 release_ss:
336 	sg_miter_stop(&mi);
337 	sg_miter_stop(&mo);
338 	writel(0, ss->base + SS_CTL);
339 	spin_unlock_irqrestore(&ss->slock, flags);
340 
341 	return err;
342 }
343 
344 /* CBC AES */
345 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
346 {
347 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
348 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
349 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
350 
351 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
352 		op->keymode;
353 	return sun4i_ss_cipher_poll(areq);
354 }
355 
356 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
357 {
358 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
359 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
360 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
361 
362 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
363 		op->keymode;
364 	return sun4i_ss_cipher_poll(areq);
365 }
366 
367 /* ECB AES */
368 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
369 {
370 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
371 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
372 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
373 
374 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
375 		op->keymode;
376 	return sun4i_ss_cipher_poll(areq);
377 }
378 
379 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
380 {
381 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
382 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
383 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
384 
385 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
386 		op->keymode;
387 	return sun4i_ss_cipher_poll(areq);
388 }
389 
390 /* CBC DES */
391 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
392 {
393 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
394 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
395 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
396 
397 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
398 		op->keymode;
399 	return sun4i_ss_cipher_poll(areq);
400 }
401 
402 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
403 {
404 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
405 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
406 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
407 
408 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
409 		op->keymode;
410 	return sun4i_ss_cipher_poll(areq);
411 }
412 
413 /* ECB DES */
414 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
415 {
416 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
417 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
418 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
419 
420 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
421 		op->keymode;
422 	return sun4i_ss_cipher_poll(areq);
423 }
424 
425 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
426 {
427 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
428 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
429 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
430 
431 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
432 		op->keymode;
433 	return sun4i_ss_cipher_poll(areq);
434 }
435 
436 /* CBC 3DES */
437 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
438 {
439 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
440 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
441 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
442 
443 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
444 		op->keymode;
445 	return sun4i_ss_cipher_poll(areq);
446 }
447 
448 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
449 {
450 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
451 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
452 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
453 
454 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
455 		op->keymode;
456 	return sun4i_ss_cipher_poll(areq);
457 }
458 
459 /* ECB 3DES */
460 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
461 {
462 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
463 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
464 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
465 
466 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
467 		op->keymode;
468 	return sun4i_ss_cipher_poll(areq);
469 }
470 
471 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
472 {
473 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
474 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
475 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
476 
477 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
478 		op->keymode;
479 	return sun4i_ss_cipher_poll(areq);
480 }
481 
482 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
483 {
484 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
485 	struct sun4i_ss_alg_template *algt;
486 	const char *name = crypto_tfm_alg_name(tfm);
487 	int err;
488 
489 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
490 
491 	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
492 			    alg.crypto.base);
493 	op->ss = algt->ss;
494 
495 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
496 	if (IS_ERR(op->fallback_tfm)) {
497 		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
498 			name, PTR_ERR(op->fallback_tfm));
499 		return PTR_ERR(op->fallback_tfm);
500 	}
501 
502 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
503 				    sizeof(struct sun4i_cipher_req_ctx) +
504 				    crypto_skcipher_reqsize(op->fallback_tfm));
505 
506 
507 	err = pm_runtime_get_sync(op->ss->dev);
508 	if (err < 0)
509 		goto error_pm;
510 
511 	return 0;
512 error_pm:
513 	crypto_free_skcipher(op->fallback_tfm);
514 	return err;
515 }
516 
517 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
518 {
519 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
520 
521 	crypto_free_skcipher(op->fallback_tfm);
522 	pm_runtime_put(op->ss->dev);
523 }
524 
525 /* check and set the AES key, prepare the mode to be used */
526 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
527 			unsigned int keylen)
528 {
529 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
530 	struct sun4i_ss_ctx *ss = op->ss;
531 
532 	switch (keylen) {
533 	case 128 / 8:
534 		op->keymode = SS_AES_128BITS;
535 		break;
536 	case 192 / 8:
537 		op->keymode = SS_AES_192BITS;
538 		break;
539 	case 256 / 8:
540 		op->keymode = SS_AES_256BITS;
541 		break;
542 	default:
543 		dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
544 		return -EINVAL;
545 	}
546 	op->keylen = keylen;
547 	memcpy(op->key, key, keylen);
548 
549 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
550 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
551 
552 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
553 }
554 
555 /* check and set the DES key, prepare the mode to be used */
556 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
557 			unsigned int keylen)
558 {
559 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
560 	int err;
561 
562 	err = verify_skcipher_des_key(tfm, key);
563 	if (err)
564 		return err;
565 
566 	op->keylen = keylen;
567 	memcpy(op->key, key, keylen);
568 
569 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
570 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
571 
572 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
573 }
574 
575 /* check and set the 3DES key, prepare the mode to be used */
576 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
577 			 unsigned int keylen)
578 {
579 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
580 	int err;
581 
582 	err = verify_skcipher_des3_key(tfm, key);
583 	if (err)
584 		return err;
585 
586 	op->keylen = keylen;
587 	memcpy(op->key, key, keylen);
588 
589 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
590 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
591 
592 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
593 
594 }
595