1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Crypto acceleration support for Rockchip RK3288
4  *
5  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6  *
7  * Author: Zain Wang <zain.wang@rock-chips.com>
8  *
9  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
10  */
11 #include "rk3288_crypto.h"
12 
13 #define RK_CRYPTO_DEC			BIT(0)
14 
15 static void rk_crypto_complete(struct crypto_async_request *base, int err)
16 {
17 	if (base->complete)
18 		base->complete(base, err);
19 }
20 
21 static int rk_handle_req(struct rk_crypto_info *dev,
22 			 struct skcipher_request *req)
23 {
24 	if (!IS_ALIGNED(req->cryptlen, dev->align_size))
25 		return -EINVAL;
26 	else
27 		return dev->enqueue(dev, &req->base);
28 }
29 
30 static int rk_aes_setkey(struct crypto_skcipher *cipher,
31 			 const u8 *key, unsigned int keylen)
32 {
33 	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
34 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
35 
36 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
37 	    keylen != AES_KEYSIZE_256)
38 		return -EINVAL;
39 	ctx->keylen = keylen;
40 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
41 	return 0;
42 }
43 
44 static int rk_des_setkey(struct crypto_skcipher *cipher,
45 			 const u8 *key, unsigned int keylen)
46 {
47 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
48 	int err;
49 
50 	err = verify_skcipher_des_key(cipher, key);
51 	if (err)
52 		return err;
53 
54 	ctx->keylen = keylen;
55 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
56 	return 0;
57 }
58 
59 static int rk_tdes_setkey(struct crypto_skcipher *cipher,
60 			  const u8 *key, unsigned int keylen)
61 {
62 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
63 	int err;
64 
65 	err = verify_skcipher_des3_key(cipher, key);
66 	if (err)
67 		return err;
68 
69 	ctx->keylen = keylen;
70 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
71 	return 0;
72 }
73 
74 static int rk_aes_ecb_encrypt(struct skcipher_request *req)
75 {
76 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
77 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
78 	struct rk_crypto_info *dev = ctx->dev;
79 
80 	ctx->mode = RK_CRYPTO_AES_ECB_MODE;
81 	return rk_handle_req(dev, req);
82 }
83 
84 static int rk_aes_ecb_decrypt(struct skcipher_request *req)
85 {
86 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
87 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
88 	struct rk_crypto_info *dev = ctx->dev;
89 
90 	ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
91 	return rk_handle_req(dev, req);
92 }
93 
94 static int rk_aes_cbc_encrypt(struct skcipher_request *req)
95 {
96 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
97 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
98 	struct rk_crypto_info *dev = ctx->dev;
99 
100 	ctx->mode = RK_CRYPTO_AES_CBC_MODE;
101 	return rk_handle_req(dev, req);
102 }
103 
104 static int rk_aes_cbc_decrypt(struct skcipher_request *req)
105 {
106 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
107 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
108 	struct rk_crypto_info *dev = ctx->dev;
109 
110 	ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
111 	return rk_handle_req(dev, req);
112 }
113 
114 static int rk_des_ecb_encrypt(struct skcipher_request *req)
115 {
116 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
117 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
118 	struct rk_crypto_info *dev = ctx->dev;
119 
120 	ctx->mode = 0;
121 	return rk_handle_req(dev, req);
122 }
123 
124 static int rk_des_ecb_decrypt(struct skcipher_request *req)
125 {
126 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
127 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
128 	struct rk_crypto_info *dev = ctx->dev;
129 
130 	ctx->mode = RK_CRYPTO_DEC;
131 	return rk_handle_req(dev, req);
132 }
133 
134 static int rk_des_cbc_encrypt(struct skcipher_request *req)
135 {
136 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
137 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
138 	struct rk_crypto_info *dev = ctx->dev;
139 
140 	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
141 	return rk_handle_req(dev, req);
142 }
143 
144 static int rk_des_cbc_decrypt(struct skcipher_request *req)
145 {
146 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
147 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
148 	struct rk_crypto_info *dev = ctx->dev;
149 
150 	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
151 	return rk_handle_req(dev, req);
152 }
153 
154 static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
155 {
156 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
157 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
158 	struct rk_crypto_info *dev = ctx->dev;
159 
160 	ctx->mode = RK_CRYPTO_TDES_SELECT;
161 	return rk_handle_req(dev, req);
162 }
163 
164 static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
165 {
166 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
167 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
168 	struct rk_crypto_info *dev = ctx->dev;
169 
170 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
171 	return rk_handle_req(dev, req);
172 }
173 
174 static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
175 {
176 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
177 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
178 	struct rk_crypto_info *dev = ctx->dev;
179 
180 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
181 	return rk_handle_req(dev, req);
182 }
183 
184 static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
185 {
186 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
187 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
188 	struct rk_crypto_info *dev = ctx->dev;
189 
190 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
191 		    RK_CRYPTO_DEC;
192 	return rk_handle_req(dev, req);
193 }
194 
195 static void rk_ablk_hw_init(struct rk_crypto_info *dev)
196 {
197 	struct skcipher_request *req =
198 		skcipher_request_cast(dev->async_req);
199 	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
200 	struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
201 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
202 	u32 ivsize, block, conf_reg = 0;
203 
204 	block = crypto_tfm_alg_blocksize(tfm);
205 	ivsize = crypto_skcipher_ivsize(cipher);
206 
207 	if (block == DES_BLOCK_SIZE) {
208 		ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
209 			     RK_CRYPTO_TDES_BYTESWAP_KEY |
210 			     RK_CRYPTO_TDES_BYTESWAP_IV;
211 		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
212 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->iv, ivsize);
213 		conf_reg = RK_CRYPTO_DESSEL;
214 	} else {
215 		ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
216 			     RK_CRYPTO_AES_KEY_CHANGE |
217 			     RK_CRYPTO_AES_BYTESWAP_KEY |
218 			     RK_CRYPTO_AES_BYTESWAP_IV;
219 		if (ctx->keylen == AES_KEYSIZE_192)
220 			ctx->mode |= RK_CRYPTO_AES_192BIT_key;
221 		else if (ctx->keylen == AES_KEYSIZE_256)
222 			ctx->mode |= RK_CRYPTO_AES_256BIT_key;
223 		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
224 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->iv, ivsize);
225 	}
226 	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
227 		    RK_CRYPTO_BYTESWAP_BRFIFO;
228 	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
229 	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
230 		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
231 }
232 
233 static void crypto_dma_start(struct rk_crypto_info *dev)
234 {
235 	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
236 	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
237 	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
238 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
239 		     _SBF(RK_CRYPTO_BLOCK_START, 16));
240 }
241 
242 static int rk_set_data_start(struct rk_crypto_info *dev)
243 {
244 	int err;
245 	struct skcipher_request *req =
246 		skcipher_request_cast(dev->async_req);
247 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
248 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
249 	u32 ivsize = crypto_skcipher_ivsize(tfm);
250 	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
251 		dev->sg_src->offset + dev->sg_src->length - ivsize;
252 
253 	/* Store the iv that need to be updated in chain mode.
254 	 * And update the IV buffer to contain the next IV for decryption mode.
255 	 */
256 	if (ctx->mode & RK_CRYPTO_DEC) {
257 		memcpy(ctx->iv, src_last_blk, ivsize);
258 		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->iv,
259 				   ivsize, dev->total - ivsize);
260 	}
261 
262 	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
263 	if (!err)
264 		crypto_dma_start(dev);
265 	return err;
266 }
267 
268 static int rk_ablk_start(struct rk_crypto_info *dev)
269 {
270 	struct skcipher_request *req =
271 		skcipher_request_cast(dev->async_req);
272 	unsigned long flags;
273 	int err = 0;
274 
275 	dev->left_bytes = req->cryptlen;
276 	dev->total = req->cryptlen;
277 	dev->sg_src = req->src;
278 	dev->first = req->src;
279 	dev->src_nents = sg_nents(req->src);
280 	dev->sg_dst = req->dst;
281 	dev->dst_nents = sg_nents(req->dst);
282 	dev->aligned = 1;
283 
284 	spin_lock_irqsave(&dev->lock, flags);
285 	rk_ablk_hw_init(dev);
286 	err = rk_set_data_start(dev);
287 	spin_unlock_irqrestore(&dev->lock, flags);
288 	return err;
289 }
290 
291 static void rk_iv_copyback(struct rk_crypto_info *dev)
292 {
293 	struct skcipher_request *req =
294 		skcipher_request_cast(dev->async_req);
295 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
296 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
297 	u32 ivsize = crypto_skcipher_ivsize(tfm);
298 
299 	/* Update the IV buffer to contain the next IV for encryption mode. */
300 	if (!(ctx->mode & RK_CRYPTO_DEC)) {
301 		if (dev->aligned) {
302 			memcpy(req->iv, sg_virt(dev->sg_dst) +
303 				dev->sg_dst->length - ivsize, ivsize);
304 		} else {
305 			memcpy(req->iv, dev->addr_vir +
306 				dev->count - ivsize, ivsize);
307 		}
308 	}
309 }
310 
311 static void rk_update_iv(struct rk_crypto_info *dev)
312 {
313 	struct skcipher_request *req =
314 		skcipher_request_cast(dev->async_req);
315 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
317 	u32 ivsize = crypto_skcipher_ivsize(tfm);
318 	u8 *new_iv = NULL;
319 
320 	if (ctx->mode & RK_CRYPTO_DEC) {
321 		new_iv = ctx->iv;
322 	} else {
323 		new_iv = page_address(sg_page(dev->sg_dst)) +
324 			 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
325 	}
326 
327 	if (ivsize == DES_BLOCK_SIZE)
328 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
329 	else if (ivsize == AES_BLOCK_SIZE)
330 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
331 }
332 
333 /* return:
334  *	true	some err was occurred
335  *	fault	no err, continue
336  */
337 static int rk_ablk_rx(struct rk_crypto_info *dev)
338 {
339 	int err = 0;
340 	struct skcipher_request *req =
341 		skcipher_request_cast(dev->async_req);
342 
343 	dev->unload_data(dev);
344 	if (!dev->aligned) {
345 		if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
346 					  dev->addr_vir, dev->count,
347 					  dev->total - dev->left_bytes -
348 					  dev->count)) {
349 			err = -EINVAL;
350 			goto out_rx;
351 		}
352 	}
353 	if (dev->left_bytes) {
354 		rk_update_iv(dev);
355 		if (dev->aligned) {
356 			if (sg_is_last(dev->sg_src)) {
357 				dev_err(dev->dev, "[%s:%d] Lack of data\n",
358 					__func__, __LINE__);
359 				err = -ENOMEM;
360 				goto out_rx;
361 			}
362 			dev->sg_src = sg_next(dev->sg_src);
363 			dev->sg_dst = sg_next(dev->sg_dst);
364 		}
365 		err = rk_set_data_start(dev);
366 	} else {
367 		rk_iv_copyback(dev);
368 		/* here show the calculation is over without any err */
369 		dev->complete(dev->async_req, 0);
370 		tasklet_schedule(&dev->queue_task);
371 	}
372 out_rx:
373 	return err;
374 }
375 
376 static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
377 {
378 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
379 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
380 	struct rk_crypto_tmp *algt;
381 
382 	algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
383 
384 	ctx->dev = algt->dev;
385 	ctx->dev->align_size = crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm)) + 1;
386 	ctx->dev->start = rk_ablk_start;
387 	ctx->dev->update = rk_ablk_rx;
388 	ctx->dev->complete = rk_crypto_complete;
389 	ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
390 
391 	return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
392 }
393 
394 static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
395 {
396 	struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
397 
398 	free_page((unsigned long)ctx->dev->addr_vir);
399 	ctx->dev->disable_clk(ctx->dev);
400 }
401 
402 struct rk_crypto_tmp rk_ecb_aes_alg = {
403 	.type = ALG_TYPE_CIPHER,
404 	.alg.skcipher = {
405 		.base.cra_name		= "ecb(aes)",
406 		.base.cra_driver_name	= "ecb-aes-rk",
407 		.base.cra_priority	= 300,
408 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
409 		.base.cra_blocksize	= AES_BLOCK_SIZE,
410 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
411 		.base.cra_alignmask	= 0x0f,
412 		.base.cra_module	= THIS_MODULE,
413 
414 		.init			= rk_ablk_init_tfm,
415 		.exit			= rk_ablk_exit_tfm,
416 		.min_keysize		= AES_MIN_KEY_SIZE,
417 		.max_keysize		= AES_MAX_KEY_SIZE,
418 		.setkey			= rk_aes_setkey,
419 		.encrypt		= rk_aes_ecb_encrypt,
420 		.decrypt		= rk_aes_ecb_decrypt,
421 	}
422 };
423 
424 struct rk_crypto_tmp rk_cbc_aes_alg = {
425 	.type = ALG_TYPE_CIPHER,
426 	.alg.skcipher = {
427 		.base.cra_name		= "cbc(aes)",
428 		.base.cra_driver_name	= "cbc-aes-rk",
429 		.base.cra_priority	= 300,
430 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
431 		.base.cra_blocksize	= AES_BLOCK_SIZE,
432 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
433 		.base.cra_alignmask	= 0x0f,
434 		.base.cra_module	= THIS_MODULE,
435 
436 		.init			= rk_ablk_init_tfm,
437 		.exit			= rk_ablk_exit_tfm,
438 		.min_keysize		= AES_MIN_KEY_SIZE,
439 		.max_keysize		= AES_MAX_KEY_SIZE,
440 		.ivsize			= AES_BLOCK_SIZE,
441 		.setkey			= rk_aes_setkey,
442 		.encrypt		= rk_aes_cbc_encrypt,
443 		.decrypt		= rk_aes_cbc_decrypt,
444 	}
445 };
446 
447 struct rk_crypto_tmp rk_ecb_des_alg = {
448 	.type = ALG_TYPE_CIPHER,
449 	.alg.skcipher = {
450 		.base.cra_name		= "ecb(des)",
451 		.base.cra_driver_name	= "ecb-des-rk",
452 		.base.cra_priority	= 300,
453 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
454 		.base.cra_blocksize	= DES_BLOCK_SIZE,
455 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
456 		.base.cra_alignmask	= 0x07,
457 		.base.cra_module	= THIS_MODULE,
458 
459 		.init			= rk_ablk_init_tfm,
460 		.exit			= rk_ablk_exit_tfm,
461 		.min_keysize		= DES_KEY_SIZE,
462 		.max_keysize		= DES_KEY_SIZE,
463 		.setkey			= rk_des_setkey,
464 		.encrypt		= rk_des_ecb_encrypt,
465 		.decrypt		= rk_des_ecb_decrypt,
466 	}
467 };
468 
469 struct rk_crypto_tmp rk_cbc_des_alg = {
470 	.type = ALG_TYPE_CIPHER,
471 	.alg.skcipher = {
472 		.base.cra_name		= "cbc(des)",
473 		.base.cra_driver_name	= "cbc-des-rk",
474 		.base.cra_priority	= 300,
475 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
476 		.base.cra_blocksize	= DES_BLOCK_SIZE,
477 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
478 		.base.cra_alignmask	= 0x07,
479 		.base.cra_module	= THIS_MODULE,
480 
481 		.init			= rk_ablk_init_tfm,
482 		.exit			= rk_ablk_exit_tfm,
483 		.min_keysize		= DES_KEY_SIZE,
484 		.max_keysize		= DES_KEY_SIZE,
485 		.ivsize			= DES_BLOCK_SIZE,
486 		.setkey			= rk_des_setkey,
487 		.encrypt		= rk_des_cbc_encrypt,
488 		.decrypt		= rk_des_cbc_decrypt,
489 	}
490 };
491 
492 struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
493 	.type = ALG_TYPE_CIPHER,
494 	.alg.skcipher = {
495 		.base.cra_name		= "ecb(des3_ede)",
496 		.base.cra_driver_name	= "ecb-des3-ede-rk",
497 		.base.cra_priority	= 300,
498 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
499 		.base.cra_blocksize	= DES_BLOCK_SIZE,
500 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
501 		.base.cra_alignmask	= 0x07,
502 		.base.cra_module	= THIS_MODULE,
503 
504 		.init			= rk_ablk_init_tfm,
505 		.exit			= rk_ablk_exit_tfm,
506 		.min_keysize		= DES3_EDE_KEY_SIZE,
507 		.max_keysize		= DES3_EDE_KEY_SIZE,
508 		.ivsize			= DES_BLOCK_SIZE,
509 		.setkey			= rk_tdes_setkey,
510 		.encrypt		= rk_des3_ede_ecb_encrypt,
511 		.decrypt		= rk_des3_ede_ecb_decrypt,
512 	}
513 };
514 
515 struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
516 	.type = ALG_TYPE_CIPHER,
517 	.alg.skcipher = {
518 		.base.cra_name		= "cbc(des3_ede)",
519 		.base.cra_driver_name	= "cbc-des3-ede-rk",
520 		.base.cra_priority	= 300,
521 		.base.cra_flags		= CRYPTO_ALG_ASYNC,
522 		.base.cra_blocksize	= DES_BLOCK_SIZE,
523 		.base.cra_ctxsize	= sizeof(struct rk_cipher_ctx),
524 		.base.cra_alignmask	= 0x07,
525 		.base.cra_module	= THIS_MODULE,
526 
527 		.init			= rk_ablk_init_tfm,
528 		.exit			= rk_ablk_exit_tfm,
529 		.min_keysize		= DES3_EDE_KEY_SIZE,
530 		.max_keysize		= DES3_EDE_KEY_SIZE,
531 		.ivsize			= DES_BLOCK_SIZE,
532 		.setkey			= rk_tdes_setkey,
533 		.encrypt		= rk_des3_ede_cbc_encrypt,
534 		.decrypt		= rk_des3_ede_cbc_decrypt,
535 	}
536 };
537