xref: /openbmc/linux/drivers/crypto/vmx/aes_cbc.c (revision 22d55f02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * AES CBC routines supporting VMX instructions on the Power 8
4  *
5  * Copyright (C) 2015 International Business Machines Inc.
6  *
7  * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
8  */
9 
10 #include <linux/types.h>
11 #include <linux/err.h>
12 #include <linux/crypto.h>
13 #include <linux/delay.h>
14 #include <asm/simd.h>
15 #include <asm/switch_to.h>
16 #include <crypto/aes.h>
17 #include <crypto/internal/simd.h>
18 #include <crypto/scatterwalk.h>
19 #include <crypto/skcipher.h>
20 
21 #include "aesp8-ppc.h"
22 
23 struct p8_aes_cbc_ctx {
24 	struct crypto_sync_skcipher *fallback;
25 	struct aes_key enc_key;
26 	struct aes_key dec_key;
27 };
28 
29 static int p8_aes_cbc_init(struct crypto_tfm *tfm)
30 {
31 	const char *alg = crypto_tfm_alg_name(tfm);
32 	struct crypto_sync_skcipher *fallback;
33 	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
34 
35 	fallback = crypto_alloc_sync_skcipher(alg, 0,
36 					      CRYPTO_ALG_NEED_FALLBACK);
37 
38 	if (IS_ERR(fallback)) {
39 		printk(KERN_ERR
40 		       "Failed to allocate transformation for '%s': %ld\n",
41 		       alg, PTR_ERR(fallback));
42 		return PTR_ERR(fallback);
43 	}
44 
45 	crypto_sync_skcipher_set_flags(
46 		fallback,
47 		crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
48 	ctx->fallback = fallback;
49 
50 	return 0;
51 }
52 
53 static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
54 {
55 	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
56 
57 	if (ctx->fallback) {
58 		crypto_free_sync_skcipher(ctx->fallback);
59 		ctx->fallback = NULL;
60 	}
61 }
62 
63 static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
64 			     unsigned int keylen)
65 {
66 	int ret;
67 	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
68 
69 	preempt_disable();
70 	pagefault_disable();
71 	enable_kernel_vsx();
72 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
73 	ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
74 	disable_kernel_vsx();
75 	pagefault_enable();
76 	preempt_enable();
77 
78 	ret |= crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
79 
80 	return ret ? -EINVAL : 0;
81 }
82 
83 static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
84 			      struct scatterlist *dst,
85 			      struct scatterlist *src, unsigned int nbytes)
86 {
87 	int ret;
88 	struct blkcipher_walk walk;
89 	struct p8_aes_cbc_ctx *ctx =
90 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
91 
92 	if (!crypto_simd_usable()) {
93 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
94 		skcipher_request_set_sync_tfm(req, ctx->fallback);
95 		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
96 		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
97 		ret = crypto_skcipher_encrypt(req);
98 		skcipher_request_zero(req);
99 	} else {
100 		blkcipher_walk_init(&walk, dst, src, nbytes);
101 		ret = blkcipher_walk_virt(desc, &walk);
102 		while ((nbytes = walk.nbytes)) {
103 			preempt_disable();
104 			pagefault_disable();
105 			enable_kernel_vsx();
106 			aes_p8_cbc_encrypt(walk.src.virt.addr,
107 					   walk.dst.virt.addr,
108 					   nbytes & AES_BLOCK_MASK,
109 					   &ctx->enc_key, walk.iv, 1);
110 			disable_kernel_vsx();
111 			pagefault_enable();
112 			preempt_enable();
113 
114 			nbytes &= AES_BLOCK_SIZE - 1;
115 			ret = blkcipher_walk_done(desc, &walk, nbytes);
116 		}
117 	}
118 
119 	return ret;
120 }
121 
122 static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
123 			      struct scatterlist *dst,
124 			      struct scatterlist *src, unsigned int nbytes)
125 {
126 	int ret;
127 	struct blkcipher_walk walk;
128 	struct p8_aes_cbc_ctx *ctx =
129 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
130 
131 	if (!crypto_simd_usable()) {
132 		SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
133 		skcipher_request_set_sync_tfm(req, ctx->fallback);
134 		skcipher_request_set_callback(req, desc->flags, NULL, NULL);
135 		skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
136 		ret = crypto_skcipher_decrypt(req);
137 		skcipher_request_zero(req);
138 	} else {
139 		blkcipher_walk_init(&walk, dst, src, nbytes);
140 		ret = blkcipher_walk_virt(desc, &walk);
141 		while ((nbytes = walk.nbytes)) {
142 			preempt_disable();
143 			pagefault_disable();
144 			enable_kernel_vsx();
145 			aes_p8_cbc_encrypt(walk.src.virt.addr,
146 					   walk.dst.virt.addr,
147 					   nbytes & AES_BLOCK_MASK,
148 					   &ctx->dec_key, walk.iv, 0);
149 			disable_kernel_vsx();
150 			pagefault_enable();
151 			preempt_enable();
152 
153 			nbytes &= AES_BLOCK_SIZE - 1;
154 			ret = blkcipher_walk_done(desc, &walk, nbytes);
155 		}
156 	}
157 
158 	return ret;
159 }
160 
161 
162 struct crypto_alg p8_aes_cbc_alg = {
163 	.cra_name = "cbc(aes)",
164 	.cra_driver_name = "p8_aes_cbc",
165 	.cra_module = THIS_MODULE,
166 	.cra_priority = 2000,
167 	.cra_type = &crypto_blkcipher_type,
168 	.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
169 	.cra_alignmask = 0,
170 	.cra_blocksize = AES_BLOCK_SIZE,
171 	.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
172 	.cra_init = p8_aes_cbc_init,
173 	.cra_exit = p8_aes_cbc_exit,
174 	.cra_blkcipher = {
175 			  .ivsize = AES_BLOCK_SIZE,
176 			  .min_keysize = AES_MIN_KEY_SIZE,
177 			  .max_keysize = AES_MAX_KEY_SIZE,
178 			  .setkey = p8_aes_cbc_setkey,
179 			  .encrypt = p8_aes_cbc_encrypt,
180 			  .decrypt = p8_aes_cbc_decrypt,
181 	},
182 };
183