Lines Matching refs:SM4_BLOCK_SIZE

73 	u8 digest[SM4_BLOCK_SIZE];
132 nbytes -= nblks * SM4_BLOCK_SIZE; in sm4_ecb_do_crypt()
175 nblocks = nbytes / SM4_BLOCK_SIZE; in sm4_cbc_crypt()
189 err = skcipher_walk_done(&walk, nbytes % SM4_BLOCK_SIZE); in sm4_cbc_crypt()
223 if (req->cryptlen < SM4_BLOCK_SIZE) in sm4_cbc_cts_crypt()
226 if (req->cryptlen == SM4_BLOCK_SIZE) in sm4_cbc_cts_crypt()
234 cbc_blocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2; in sm4_cbc_cts_crypt()
237 cbc_blocks * SM4_BLOCK_SIZE, in sm4_cbc_cts_crypt()
252 req->cryptlen - cbc_blocks * SM4_BLOCK_SIZE, in sm4_cbc_cts_crypt()
303 dst += nblks * SM4_BLOCK_SIZE; in sm4_cfb_encrypt()
304 src += nblks * SM4_BLOCK_SIZE; in sm4_cfb_encrypt()
305 nbytes -= nblks * SM4_BLOCK_SIZE; in sm4_cfb_encrypt()
310 u8 keystream[SM4_BLOCK_SIZE]; in sm4_cfb_encrypt()
345 dst += nblks * SM4_BLOCK_SIZE; in sm4_cfb_decrypt()
346 src += nblks * SM4_BLOCK_SIZE; in sm4_cfb_decrypt()
347 nbytes -= nblks * SM4_BLOCK_SIZE; in sm4_cfb_decrypt()
352 u8 keystream[SM4_BLOCK_SIZE]; in sm4_cfb_decrypt()
387 dst += nblks * SM4_BLOCK_SIZE; in sm4_ctr_crypt()
388 src += nblks * SM4_BLOCK_SIZE; in sm4_ctr_crypt()
389 nbytes -= nblks * SM4_BLOCK_SIZE; in sm4_ctr_crypt()
394 u8 keystream[SM4_BLOCK_SIZE]; in sm4_ctr_crypt()
397 crypto_inc(walk.iv, SM4_BLOCK_SIZE); in sm4_ctr_crypt()
414 int tail = req->cryptlen % SM4_BLOCK_SIZE; in sm4_xts_crypt()
423 if (req->cryptlen < SM4_BLOCK_SIZE) in sm4_xts_crypt()
431 int nblocks = DIV_ROUND_UP(req->cryptlen, SM4_BLOCK_SIZE) - 2; in sm4_xts_crypt()
440 nblocks * SM4_BLOCK_SIZE, req->iv); in sm4_xts_crypt()
449 while ((nbytes = walk.nbytes) >= SM4_BLOCK_SIZE) { in sm4_xts_crypt()
451 nbytes &= ~(SM4_BLOCK_SIZE - 1); in sm4_xts_crypt()
482 skcipher_request_set_crypt(&subreq, src, dst, SM4_BLOCK_SIZE + tail, in sm4_xts_crypt()
521 .cra_blocksize = SM4_BLOCK_SIZE,
535 .cra_blocksize = SM4_BLOCK_SIZE,
541 .ivsize = SM4_BLOCK_SIZE,
556 .ivsize = SM4_BLOCK_SIZE,
557 .chunksize = SM4_BLOCK_SIZE,
572 .ivsize = SM4_BLOCK_SIZE,
573 .chunksize = SM4_BLOCK_SIZE,
582 .cra_blocksize = SM4_BLOCK_SIZE,
588 .ivsize = SM4_BLOCK_SIZE,
589 .walksize = SM4_BLOCK_SIZE * 2,
598 .cra_blocksize = SM4_BLOCK_SIZE,
604 .ivsize = SM4_BLOCK_SIZE,
605 .walksize = SM4_BLOCK_SIZE * 2,
638 memset(consts, 0, SM4_BLOCK_SIZE); in sm4_cmac_setkey()
668 u8 __aligned(8) key2[SM4_BLOCK_SIZE]; in sm4_xcbc_setkey()
669 static u8 const ks[3][SM4_BLOCK_SIZE] = { in sm4_xcbc_setkey()
670 { [0 ... SM4_BLOCK_SIZE - 1] = 0x1}, in sm4_xcbc_setkey()
671 { [0 ... SM4_BLOCK_SIZE - 1] = 0x2}, in sm4_xcbc_setkey()
672 { [0 ... SM4_BLOCK_SIZE - 1] = 0x3}, in sm4_xcbc_setkey()
698 memset(ctx->digest, 0, SM4_BLOCK_SIZE); in sm4_mac_init()
714 if (ctx->len || ctx->len + len < SM4_BLOCK_SIZE) { in sm4_mac_update()
715 l = min(len, SM4_BLOCK_SIZE - ctx->len); in sm4_mac_update()
723 if (len && (ctx->len % SM4_BLOCK_SIZE) == 0) { in sm4_mac_update()
726 if (len < SM4_BLOCK_SIZE && ctx->len == SM4_BLOCK_SIZE) { in sm4_mac_update()
731 nblocks = len / SM4_BLOCK_SIZE; in sm4_mac_update()
732 len %= SM4_BLOCK_SIZE; in sm4_mac_update()
735 nblocks, (ctx->len == SM4_BLOCK_SIZE), in sm4_mac_update()
738 p += nblocks * SM4_BLOCK_SIZE; in sm4_mac_update()
741 ctx->len = SM4_BLOCK_SIZE; in sm4_mac_update()
761 if (ctx->len != SM4_BLOCK_SIZE) { in sm4_cmac_final()
763 consts += SM4_BLOCK_SIZE; in sm4_cmac_final()
771 memcpy(out, ctx->digest, SM4_BLOCK_SIZE); in sm4_cmac_final()
788 memcpy(out, ctx->digest, SM4_BLOCK_SIZE); in sm4_cbcmac_final()
799 .cra_blocksize = SM4_BLOCK_SIZE,
801 + SM4_BLOCK_SIZE * 2,
804 .digestsize = SM4_BLOCK_SIZE,
815 .cra_blocksize = SM4_BLOCK_SIZE,
817 + SM4_BLOCK_SIZE * 2,
820 .digestsize = SM4_BLOCK_SIZE,
835 .digestsize = SM4_BLOCK_SIZE,