1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Crypto acceleration support for Rockchip RK3288
4  *
5  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6  *
7  * Author: Zain Wang <zain.wang@rock-chips.com>
8  *
9  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10  */
11 #include "rk3288_crypto.h"
12 
13 #define RK_CRYPTO_DEC			BIT(0)
14 
15 static void rk_crypto_complete(struct crypto_async_request *base, int err)
16 {
17 	if (base->complete)
18 		base->complete(base, err);
19 }
20 
21 static int rk_handle_req(struct rk_crypto_info *dev,
22 			 struct skcipher_request *req)
23 {
24 	if (!IS_ALIGNED(req->cryptlen, dev->align_size))
25 		return -EINVAL;
26 	else
27 		return dev->enqueue(dev, &req->base);
28 }
29 
30 static int rk_aes_setkey(struct crypto_skcipher *cipher,
31 			 const u8 *key, unsigned int keylen)
32 {
33 	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
34 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
35 
36 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
37 	    keylen != AES_KEYSIZE_256) {
38 		crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
39 		return -EINVAL;
40 	}
41 	ctx->keylen = keylen;
42 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
43 	return 0;
44 }
45 
46 static int rk_des_setkey(struct crypto_skcipher *cipher,
47 			 const u8 *key, unsigned int keylen)
48 {
49 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
50 	int err;
51 
52 	err = verify_skcipher_des_key(cipher, key);
53 	if (err)
54 		return err;
55 
56 	ctx->keylen = keylen;
57 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
58 	return 0;
59 }
60 
61 static int rk_tdes_setkey(struct crypto_skcipher *cipher,
62 			  const u8 *key, unsigned int keylen)
63 {
64 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
65 	int err;
66 
67 	err = verify_skcipher_des3_key(cipher, key);
68 	if (err)
69 		return err;
70 
71 	ctx->keylen = keylen;
72 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
73 	return 0;
74 }
75 
76 static int rk_aes_ecb_encrypt(struct skcipher_request *req)
77 {
78 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
79 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
80 	struct rk_crypto_info *dev = ctx->dev;
81 
82 	ctx->mode = RK_CRYPTO_AES_ECB_MODE;
83 	return rk_handle_req(dev, req);
84 }
85 
86 static int rk_aes_ecb_decrypt(struct skcipher_request *req)
87 {
88 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
89 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
90 	struct rk_crypto_info *dev = ctx->dev;
91 
92 	ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
93 	return rk_handle_req(dev, req);
94 }
95 
96 static int rk_aes_cbc_encrypt(struct skcipher_request *req)
97 {
98 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
99 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
100 	struct rk_crypto_info *dev = ctx->dev;
101 
102 	ctx->mode = RK_CRYPTO_AES_CBC_MODE;
103 	return rk_handle_req(dev, req);
104 }
105 
106 static int rk_aes_cbc_decrypt(struct skcipher_request *req)
107 {
108 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
109 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
110 	struct rk_crypto_info *dev = ctx->dev;
111 
112 	ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
113 	return rk_handle_req(dev, req);
114 }
115 
116 static int rk_des_ecb_encrypt(struct skcipher_request *req)
117 {
118 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
119 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
120 	struct rk_crypto_info *dev = ctx->dev;
121 
122 	ctx->mode = 0;
123 	return rk_handle_req(dev, req);
124 }
125 
126 static int rk_des_ecb_decrypt(struct skcipher_request *req)
127 {
128 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
129 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
130 	struct rk_crypto_info *dev = ctx->dev;
131 
132 	ctx->mode = RK_CRYPTO_DEC;
133 	return rk_handle_req(dev, req);
134 }
135 
136 static int rk_des_cbc_encrypt(struct skcipher_request *req)
137 {
138 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
139 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
140 	struct rk_crypto_info *dev = ctx->dev;
141 
142 	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
143 	return rk_handle_req(dev, req);
144 }
145 
146 static int rk_des_cbc_decrypt(struct skcipher_request *req)
147 {
148 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
149 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
150 	struct rk_crypto_info *dev = ctx->dev;
151 
152 	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
153 	return rk_handle_req(dev, req);
154 }
155 
156 static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
157 {
158 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
159 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
160 	struct rk_crypto_info *dev = ctx->dev;
161 
162 	ctx->mode = RK_CRYPTO_TDES_SELECT;
163 	return rk_handle_req(dev, req);
164 }
165 
166 static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
167 {
168 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
169 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
170 	struct rk_crypto_info *dev = ctx->dev;
171 
172 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
173 	return rk_handle_req(dev, req);
174 }
175 
176 static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
177 {
178 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
179 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
180 	struct rk_crypto_info *dev = ctx->dev;
181 
182 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
183 	return rk_handle_req(dev, req);
184 }
185 
186 static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
187 {
188 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
189 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
190 	struct rk_crypto_info *dev = ctx->dev;
191 
192 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
193 		    RK_CRYPTO_DEC;
194 	return rk_handle_req(dev, req);
195 }
196 
197 static void rk_ablk_hw_init(struct rk_crypto_info *dev)
198 {
199 	struct skcipher_request *req =
200 		skcipher_request_cast(dev->async_req);
201 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
202 	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
203 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
204 	u32 ivsize, block, conf_reg = 0;
205 
206 	block = crypto_tfm_alg_blocksize(tfm);
207 	ivsize = crypto_skcipher_ivsize(cipher);
208 
209 	if (block == DES_BLOCK_SIZE) {
210 		ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
211 			     RK_CRYPTO_TDES_BYTESWAP_KEY |
212 			     RK_CRYPTO_TDES_BYTESWAP_IV;
213 		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
214 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
215 		conf_reg = RK_CRYPTO_DESSEL;
216 	} else {
217 		ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
218 			     RK_CRYPTO_AES_KEY_CHANGE |
219 			     RK_CRYPTO_AES_BYTESWAP_KEY |
220 			     RK_CRYPTO_AES_BYTESWAP_IV;
221 		if (ctx->keylen == AES_KEYSIZE_192)
222 			ctx->mode |= RK_CRYPTO_AES_192BIT_key;
223 		else if (ctx->keylen == AES_KEYSIZE_256)
224 			ctx->mode |= RK_CRYPTO_AES_256BIT_key;
225 		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
226 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
227 	}
228 	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
229 		    RK_CRYPTO_BYTESWAP_BRFIFO;
230 	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
231 	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
232 		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
233 }
234 
235 static void crypto_dma_start(struct rk_crypto_info *dev)
236 {
237 	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
238 	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
239 	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
240 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
241 		     _SBF(RK_CRYPTO_BLOCK_START, 16));
242 }
243 
244 static int rk_set_data_start(struct rk_crypto_info *dev)
245 {
246 	int err;
247 	struct skcipher_request *req =
248 		skcipher_request_cast(dev->async_req);
249 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
250 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
251 	u32 ivsize = crypto_skcipher_ivsize(tfm);
252 	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
253 		dev->sg_src->offset + dev->sg_src->length - ivsize;
254 
255 	/* Store the iv that need to be updated in chain mode.
256 	 * And update the IV buffer to contain the next IV for decryption mode.
257 	 */
258 	if (ctx->mode & RK_CRYPTO_DEC) {
259 		memcpy(ctx->iv, src_last_blk, ivsize);
260 		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
261 				   ivsize, dev->total - ivsize);
262 	}
263 
264 	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
265 	if (!err)
266 		crypto_dma_start(dev);
267 	return err;
268 }
269 
270 static int rk_ablk_start(struct rk_crypto_info *dev)
271 {
272 	struct skcipher_request *req =
273 		skcipher_request_cast(dev->async_req);
274 	unsigned long flags;
275 	int err = 0;
276 
277 	dev->left_bytes = req->cryptlen;
278 	dev->total = req->cryptlen;
279 	dev->sg_src = req->src;
280 	dev->first = req->src;
281 	dev->src_nents = sg_nents(req->src);
282 	dev->sg_dst = req->dst;
283 	dev->dst_nents = sg_nents(req->dst);
284 	dev->aligned = 1;
285 
286 	spin_lock_irqsave(&dev->lock, flags);
287 	rk_ablk_hw_init(dev);
288 	err = rk_set_data_start(dev);
289 	spin_unlock_irqrestore(&dev->lock, flags);
290 	return err;
291 }
292 
293 static void rk_iv_copyback(struct rk_crypto_info *dev)
294 {
295 	struct skcipher_request *req =
296 		skcipher_request_cast(dev->async_req);
297 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
298 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
299 	u32 ivsize = crypto_skcipher_ivsize(tfm);
300 
301 	/* Update the IV buffer to contain the next IV for encryption mode. */
302 	if (!(ctx->mode & RK_CRYPTO_DEC)) {
303 		if (dev->aligned) {
304 			memcpy(req->iv, sg_virt(dev->sg_dst) +
305 				dev->sg_dst->length - ivsize, ivsize);
306 		} else {
307 			memcpy(req->iv, dev->addr_vir +
308 				dev->count - ivsize, ivsize);
309 		}
310 	}
311 }
312 
313 static void rk_update_iv(struct rk_crypto_info *dev)
314 {
315 	struct skcipher_request *req =
316 		skcipher_request_cast(dev->async_req);
317 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
318 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
319 	u32 ivsize = crypto_skcipher_ivsize(tfm);
320 	u8 *new_iv = NULL;
321 
322 	if (ctx->mode & RK_CRYPTO_DEC) {
323 		new_iv = ctx->iv;
324 	} else {
325 		new_iv = page_address(sg_page(dev->sg_dst)) +
326 			 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
327 	}
328 
329 	if (ivsize == DES_BLOCK_SIZE)
330 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
331 	else if (ivsize == AES_BLOCK_SIZE)
332 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
333 }
334 
335 /* return:
336  *	true	some err was occurred
337  *	fault	no err, continue
338  */
339 static int rk_ablk_rx(struct rk_crypto_info *dev)
340 {
341 	int err = 0;
342 	struct skcipher_request *req =
343 		skcipher_request_cast(dev->async_req);
344 
345 	dev->unload_data(dev);
346 	if (!dev->aligned) {
347 		if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
348 					  dev->addr_vir, dev->count,
349 					  dev->total - dev->left_bytes -
350 					  dev->count)) {
351 			err = -EINVAL;
352 			goto out_rx;
353 		}
354 	}
355 	if (dev->left_bytes) {
356 		rk_update_iv(dev);
357 		if (dev->aligned) {
358 			if (sg_is_last(dev->sg_src)) {
359 				dev_err(dev->dev, "[%s:%d] Lack of data\n",
360 					__func__, __LINE__);
361 				err = -ENOMEM;
362 				goto out_rx;
363 			}
364 			dev->sg_src = sg_next(dev->sg_src);
365 			dev->sg_dst = sg_next(dev->sg_dst);
366 		}
367 		err = rk_set_data_start(dev);
368 	} else {
369 		rk_iv_copyback(dev);
370 		/* here show the calculation is over without any err */
371 		dev->complete(dev->async_req, 0);
372 		tasklet_schedule(&dev->queue_task);
373 	}
374 out_rx:
375 	return err;
376 }
377 
378 static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
379 {
380 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
381 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
382 	struct rk_crypto_tmp *algt;
383 
384 	algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
385 
386 	ctx->dev = algt->dev;
387 	ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
388 	ctx->dev->start = rk_ablk_start;
389 	ctx->dev->update = rk_ablk_rx;
390 	ctx->dev->complete = rk_crypto_complete;
391 	ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
392 
393 	return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
394 }
395 
396 static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
397 {
398 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
399 
400 	free_page((unsigned long)ctx->dev->addr_vir);
401 	ctx->dev->disable_clk(ctx->dev);
402 }
403 
404 struct rk_crypto_tmp rk_ecb_aes_alg = {
405 	.type = ALG_TYPE_CIPHER,
406 	.alg.skcipher = {
407 		.base.cra_name		= "ecb(aes)",
408 		.base.cra_driver_name	= "ecb-aes-rk",
409 		.base.cra_priority	= 300,
410 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
411 		.base.cra_blocksize	= AES_BLOCK_SIZE,
412 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
413 		.base.cra_alignmask	= 0x0f,
414 		.base.cra_module	= THIS_MODULE,
415 
416 		.init			= rk_ablk_init_tfm,
417 		.exit			= rk_ablk_exit_tfm,
418 		.min_keysize		= AES_MIN_KEY_SIZE,
419 		.max_keysize		= AES_MAX_KEY_SIZE,
420 		.setkey			= rk_aes_setkey,
421 		.encrypt		= rk_aes_ecb_encrypt,
422 		.decrypt		= rk_aes_ecb_decrypt,
423 	}
424 };
425 
426 struct rk_crypto_tmp rk_cbc_aes_alg = {
427 	.type = ALG_TYPE_CIPHER,
428 	.alg.skcipher = {
429 		.base.cra_name		= "cbc(aes)",
430 		.base.cra_driver_name	= "cbc-aes-rk",
431 		.base.cra_priority	= 300,
432 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
433 		.base.cra_blocksize	= AES_BLOCK_SIZE,
434 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
435 		.base.cra_alignmask	= 0x0f,
436 		.base.cra_module	= THIS_MODULE,
437 
438 		.init			= rk_ablk_init_tfm,
439 		.exit			= rk_ablk_exit_tfm,
440 		.min_keysize		= AES_MIN_KEY_SIZE,
441 		.max_keysize		= AES_MAX_KEY_SIZE,
442 		.ivsize			= AES_BLOCK_SIZE,
443 		.setkey			= rk_aes_setkey,
444 		.encrypt		= rk_aes_cbc_encrypt,
445 		.decrypt		= rk_aes_cbc_decrypt,
446 	}
447 };
448 
449 struct rk_crypto_tmp rk_ecb_des_alg = {
450 	.type = ALG_TYPE_CIPHER,
451 	.alg.skcipher = {
452 		.base.cra_name		= "ecb(des)",
453 		.base.cra_driver_name	= "ecb-des-rk",
454 		.base.cra_priority	= 300,
455 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
456 		.base.cra_blocksize	= DES_BLOCK_SIZE,
457 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
458 		.base.cra_alignmask	= 0x07,
459 		.base.cra_module	= THIS_MODULE,
460 
461 		.init			= rk_ablk_init_tfm,
462 		.exit			= rk_ablk_exit_tfm,
463 		.min_keysize		= DES_KEY_SIZE,
464 		.max_keysize		= DES_KEY_SIZE,
465 		.setkey			= rk_des_setkey,
466 		.encrypt		= rk_des_ecb_encrypt,
467 		.decrypt		= rk_des_ecb_decrypt,
468 	}
469 };
470 
471 struct rk_crypto_tmp rk_cbc_des_alg = {
472 	.type = ALG_TYPE_CIPHER,
473 	.alg.skcipher = {
474 		.base.cra_name		= "cbc(des)",
475 		.base.cra_driver_name	= "cbc-des-rk",
476 		.base.cra_priority	= 300,
477 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
478 		.base.cra_blocksize	= DES_BLOCK_SIZE,
479 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
480 		.base.cra_alignmask	= 0x07,
481 		.base.cra_module	= THIS_MODULE,
482 
483 		.init			= rk_ablk_init_tfm,
484 		.exit			= rk_ablk_exit_tfm,
485 		.min_keysize		= DES_KEY_SIZE,
486 		.max_keysize		= DES_KEY_SIZE,
487 		.ivsize			= DES_BLOCK_SIZE,
488 		.setkey			= rk_des_setkey,
489 		.encrypt		= rk_des_cbc_encrypt,
490 		.decrypt		= rk_des_cbc_decrypt,
491 	}
492 };
493 
494 struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
495 	.type = ALG_TYPE_CIPHER,
496 	.alg.skcipher = {
497 		.base.cra_name		= "ecb(des3_ede)",
498 		.base.cra_driver_name	= "ecb-des3-ede-rk",
499 		.base.cra_priority	= 300,
500 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
501 		.base.cra_blocksize	= DES_BLOCK_SIZE,
502 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
503 		.base.cra_alignmask	= 0x07,
504 		.base.cra_module	= THIS_MODULE,
505 
506 		.init			= rk_ablk_init_tfm,
507 		.exit			= rk_ablk_exit_tfm,
508 		.min_keysize		= DES3_EDE_KEY_SIZE,
509 		.max_keysize		= DES3_EDE_KEY_SIZE,
510 		.ivsize			= DES_BLOCK_SIZE,
511 		.setkey			= rk_tdes_setkey,
512 		.encrypt		= rk_des3_ede_ecb_encrypt,
513 		.decrypt		= rk_des3_ede_ecb_decrypt,
514 	}
515 };
516 
517 struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
518 	.type = ALG_TYPE_CIPHER,
519 	.alg.skcipher = {
520 		.base.cra_name		= "cbc(des3_ede)",
521 		.base.cra_driver_name	= "cbc-des3-ede-rk",
522 		.base.cra_priority	= 300,
523 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
524 		.base.cra_blocksize	= DES_BLOCK_SIZE,
525 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
526 		.base.cra_alignmask	= 0x07,
527 		.base.cra_module	= THIS_MODULE,
528 
529 		.init			= rk_ablk_init_tfm,
530 		.exit			= rk_ablk_exit_tfm,
531 		.min_keysize		= DES3_EDE_KEY_SIZE,
532 		.max_keysize		= DES3_EDE_KEY_SIZE,
533 		.ivsize			= DES_BLOCK_SIZE,
534 		.setkey			= rk_tdes_setkey,
535 		.encrypt		= rk_des3_ede_cbc_encrypt,
536 		.decrypt		= rk_des3_ede_cbc_decrypt,
537 	}
538 };
539