xref: /openbmc/linux/drivers/crypto/padlock-aes.c (revision 545e4006)
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
7  *
8  */
9 
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <asm/byteorder.h>
19 #include "padlock.h"
20 
21 /* Control word. */
22 struct cword {
23 	unsigned int __attribute__ ((__packed__))
24 		rounds:4,
25 		algo:3,
26 		keygen:1,
27 		interm:1,
28 		encdec:1,
29 		ksize:2;
30 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
31 
32 /* Whenever making any changes to the following
33  * structure *make sure* you keep E, d_data
34  * and cword aligned on 16 Bytes boundaries and
35  * the Hardware can access 16 * 16 bytes of E and d_data
36  * (only the first 15 * 16 bytes matter but the HW reads
37  * more).
38  */
39 struct aes_ctx {
40 	u32 E[AES_MAX_KEYLENGTH_U32]
41 		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
42 	u32 d_data[AES_MAX_KEYLENGTH_U32]
43 		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
44 	struct {
45 		struct cword encrypt;
46 		struct cword decrypt;
47 	} cword;
48 	u32 *D;
49 };
50 
51 /* Tells whether the ACE is capable to generate
52    the extended key for a given key_len. */
53 static inline int
54 aes_hw_extkey_available(uint8_t key_len)
55 {
56 	/* TODO: We should check the actual CPU model/stepping
57 	         as it's possible that the capability will be
58 	         added in the next CPU revisions. */
59 	if (key_len == 16)
60 		return 1;
61 	return 0;
62 }
63 
64 static inline struct aes_ctx *aes_ctx_common(void *ctx)
65 {
66 	unsigned long addr = (unsigned long)ctx;
67 	unsigned long align = PADLOCK_ALIGNMENT;
68 
69 	if (align <= crypto_tfm_ctx_alignment())
70 		align = 1;
71 	return (struct aes_ctx *)ALIGN(addr, align);
72 }
73 
74 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
75 {
76 	return aes_ctx_common(crypto_tfm_ctx(tfm));
77 }
78 
79 static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
80 {
81 	return aes_ctx_common(crypto_blkcipher_ctx(tfm));
82 }
83 
84 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
85 		       unsigned int key_len)
86 {
87 	struct aes_ctx *ctx = aes_ctx(tfm);
88 	const __le32 *key = (const __le32 *)in_key;
89 	u32 *flags = &tfm->crt_flags;
90 	struct crypto_aes_ctx gen_aes;
91 
92 	if (key_len % 8) {
93 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
94 		return -EINVAL;
95 	}
96 
97 	/*
98 	 * If the hardware is capable of generating the extended key
99 	 * itself we must supply the plain key for both encryption
100 	 * and decryption.
101 	 */
102 	ctx->D = ctx->E;
103 
104 	ctx->E[0] = le32_to_cpu(key[0]);
105 	ctx->E[1] = le32_to_cpu(key[1]);
106 	ctx->E[2] = le32_to_cpu(key[2]);
107 	ctx->E[3] = le32_to_cpu(key[3]);
108 
109 	/* Prepare control words. */
110 	memset(&ctx->cword, 0, sizeof(ctx->cword));
111 
112 	ctx->cword.decrypt.encdec = 1;
113 	ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
114 	ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
115 	ctx->cword.encrypt.ksize = (key_len - 16) / 8;
116 	ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
117 
118 	/* Don't generate extended keys if the hardware can do it. */
119 	if (aes_hw_extkey_available(key_len))
120 		return 0;
121 
122 	ctx->D = ctx->d_data;
123 	ctx->cword.encrypt.keygen = 1;
124 	ctx->cword.decrypt.keygen = 1;
125 
126 	if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
127 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
128 		return -EINVAL;
129 	}
130 
131 	memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
132 	memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
133 	return 0;
134 }
135 
136 /* ====== Encryption/decryption routines ====== */
137 
138 /* These are the real call to PadLock. */
139 static inline void padlock_reset_key(void)
140 {
141 	asm volatile ("pushfl; popfl");
142 }
143 
144 static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
145 				  void *control_word)
146 {
147 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
148 		      : "+S"(input), "+D"(output)
149 		      : "d"(control_word), "b"(key), "c"(1));
150 }
151 
152 static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
153 {
154 	u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
155 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
156 
157 	memcpy(tmp, in, AES_BLOCK_SIZE);
158 	padlock_xcrypt(tmp, out, key, cword);
159 }
160 
161 static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
162 			     struct cword *cword)
163 {
164 	/* padlock_xcrypt requires at least two blocks of data. */
165 	if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
166 		       (PAGE_SIZE - 1)))) {
167 		aes_crypt_copy(in, out, key, cword);
168 		return;
169 	}
170 
171 	padlock_xcrypt(in, out, key, cword);
172 }
173 
174 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
175 				      void *control_word, u32 count)
176 {
177 	if (count == 1) {
178 		aes_crypt(input, output, key, control_word);
179 		return;
180 	}
181 
182 	asm volatile ("test $1, %%cl;"
183 		      "je 1f;"
184 		      "lea -1(%%ecx), %%eax;"
185 		      "mov $1, %%ecx;"
186 		      ".byte 0xf3,0x0f,0xa7,0xc8;"	/* rep xcryptecb */
187 		      "mov %%eax, %%ecx;"
188 		      "1:"
189 		      ".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
190 		      : "+S"(input), "+D"(output)
191 		      : "d"(control_word), "b"(key), "c"(count)
192 		      : "ax");
193 }
194 
195 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
196 				     u8 *iv, void *control_word, u32 count)
197 {
198 	/* rep xcryptcbc */
199 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
200 		      : "+S" (input), "+D" (output), "+a" (iv)
201 		      : "d" (control_word), "b" (key), "c" (count));
202 	return iv;
203 }
204 
205 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
206 {
207 	struct aes_ctx *ctx = aes_ctx(tfm);
208 	padlock_reset_key();
209 	aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
210 }
211 
212 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
213 {
214 	struct aes_ctx *ctx = aes_ctx(tfm);
215 	padlock_reset_key();
216 	aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
217 }
218 
219 static struct crypto_alg aes_alg = {
220 	.cra_name		=	"aes",
221 	.cra_driver_name	=	"aes-padlock",
222 	.cra_priority		=	PADLOCK_CRA_PRIORITY,
223 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
224 	.cra_blocksize		=	AES_BLOCK_SIZE,
225 	.cra_ctxsize		=	sizeof(struct aes_ctx),
226 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
227 	.cra_module		=	THIS_MODULE,
228 	.cra_list		=	LIST_HEAD_INIT(aes_alg.cra_list),
229 	.cra_u			=	{
230 		.cipher = {
231 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
232 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
233 			.cia_setkey	   	= 	aes_set_key,
234 			.cia_encrypt	 	=	aes_encrypt,
235 			.cia_decrypt	  	=	aes_decrypt,
236 		}
237 	}
238 };
239 
240 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
241 			   struct scatterlist *dst, struct scatterlist *src,
242 			   unsigned int nbytes)
243 {
244 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
245 	struct blkcipher_walk walk;
246 	int err;
247 
248 	padlock_reset_key();
249 
250 	blkcipher_walk_init(&walk, dst, src, nbytes);
251 	err = blkcipher_walk_virt(desc, &walk);
252 
253 	while ((nbytes = walk.nbytes)) {
254 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
255 				   ctx->E, &ctx->cword.encrypt,
256 				   nbytes / AES_BLOCK_SIZE);
257 		nbytes &= AES_BLOCK_SIZE - 1;
258 		err = blkcipher_walk_done(desc, &walk, nbytes);
259 	}
260 
261 	return err;
262 }
263 
264 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
265 			   struct scatterlist *dst, struct scatterlist *src,
266 			   unsigned int nbytes)
267 {
268 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
269 	struct blkcipher_walk walk;
270 	int err;
271 
272 	padlock_reset_key();
273 
274 	blkcipher_walk_init(&walk, dst, src, nbytes);
275 	err = blkcipher_walk_virt(desc, &walk);
276 
277 	while ((nbytes = walk.nbytes)) {
278 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
279 				   ctx->D, &ctx->cword.decrypt,
280 				   nbytes / AES_BLOCK_SIZE);
281 		nbytes &= AES_BLOCK_SIZE - 1;
282 		err = blkcipher_walk_done(desc, &walk, nbytes);
283 	}
284 
285 	return err;
286 }
287 
288 static struct crypto_alg ecb_aes_alg = {
289 	.cra_name		=	"ecb(aes)",
290 	.cra_driver_name	=	"ecb-aes-padlock",
291 	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
292 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
293 	.cra_blocksize		=	AES_BLOCK_SIZE,
294 	.cra_ctxsize		=	sizeof(struct aes_ctx),
295 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
296 	.cra_type		=	&crypto_blkcipher_type,
297 	.cra_module		=	THIS_MODULE,
298 	.cra_list		=	LIST_HEAD_INIT(ecb_aes_alg.cra_list),
299 	.cra_u			=	{
300 		.blkcipher = {
301 			.min_keysize		=	AES_MIN_KEY_SIZE,
302 			.max_keysize		=	AES_MAX_KEY_SIZE,
303 			.setkey	   		= 	aes_set_key,
304 			.encrypt		=	ecb_aes_encrypt,
305 			.decrypt		=	ecb_aes_decrypt,
306 		}
307 	}
308 };
309 
310 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
311 			   struct scatterlist *dst, struct scatterlist *src,
312 			   unsigned int nbytes)
313 {
314 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
315 	struct blkcipher_walk walk;
316 	int err;
317 
318 	padlock_reset_key();
319 
320 	blkcipher_walk_init(&walk, dst, src, nbytes);
321 	err = blkcipher_walk_virt(desc, &walk);
322 
323 	while ((nbytes = walk.nbytes)) {
324 		u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
325 					    walk.dst.virt.addr, ctx->E,
326 					    walk.iv, &ctx->cword.encrypt,
327 					    nbytes / AES_BLOCK_SIZE);
328 		memcpy(walk.iv, iv, AES_BLOCK_SIZE);
329 		nbytes &= AES_BLOCK_SIZE - 1;
330 		err = blkcipher_walk_done(desc, &walk, nbytes);
331 	}
332 
333 	return err;
334 }
335 
336 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
337 			   struct scatterlist *dst, struct scatterlist *src,
338 			   unsigned int nbytes)
339 {
340 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
341 	struct blkcipher_walk walk;
342 	int err;
343 
344 	padlock_reset_key();
345 
346 	blkcipher_walk_init(&walk, dst, src, nbytes);
347 	err = blkcipher_walk_virt(desc, &walk);
348 
349 	while ((nbytes = walk.nbytes)) {
350 		padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
351 				   ctx->D, walk.iv, &ctx->cword.decrypt,
352 				   nbytes / AES_BLOCK_SIZE);
353 		nbytes &= AES_BLOCK_SIZE - 1;
354 		err = blkcipher_walk_done(desc, &walk, nbytes);
355 	}
356 
357 	return err;
358 }
359 
360 static struct crypto_alg cbc_aes_alg = {
361 	.cra_name		=	"cbc(aes)",
362 	.cra_driver_name	=	"cbc-aes-padlock",
363 	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
364 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
365 	.cra_blocksize		=	AES_BLOCK_SIZE,
366 	.cra_ctxsize		=	sizeof(struct aes_ctx),
367 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
368 	.cra_type		=	&crypto_blkcipher_type,
369 	.cra_module		=	THIS_MODULE,
370 	.cra_list		=	LIST_HEAD_INIT(cbc_aes_alg.cra_list),
371 	.cra_u			=	{
372 		.blkcipher = {
373 			.min_keysize		=	AES_MIN_KEY_SIZE,
374 			.max_keysize		=	AES_MAX_KEY_SIZE,
375 			.ivsize			=	AES_BLOCK_SIZE,
376 			.setkey	   		= 	aes_set_key,
377 			.encrypt		=	cbc_aes_encrypt,
378 			.decrypt		=	cbc_aes_decrypt,
379 		}
380 	}
381 };
382 
383 static int __init padlock_init(void)
384 {
385 	int ret;
386 
387 	if (!cpu_has_xcrypt) {
388 		printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
389 		return -ENODEV;
390 	}
391 
392 	if (!cpu_has_xcrypt_enabled) {
393 		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
394 		return -ENODEV;
395 	}
396 
397 	if ((ret = crypto_register_alg(&aes_alg)))
398 		goto aes_err;
399 
400 	if ((ret = crypto_register_alg(&ecb_aes_alg)))
401 		goto ecb_aes_err;
402 
403 	if ((ret = crypto_register_alg(&cbc_aes_alg)))
404 		goto cbc_aes_err;
405 
406 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
407 
408 out:
409 	return ret;
410 
411 cbc_aes_err:
412 	crypto_unregister_alg(&ecb_aes_alg);
413 ecb_aes_err:
414 	crypto_unregister_alg(&aes_alg);
415 aes_err:
416 	printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
417 	goto out;
418 }
419 
420 static void __exit padlock_fini(void)
421 {
422 	crypto_unregister_alg(&cbc_aes_alg);
423 	crypto_unregister_alg(&ecb_aes_alg);
424 	crypto_unregister_alg(&aes_alg);
425 }
426 
427 module_init(padlock_init);
428 module_exit(padlock_fini);
429 
430 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
431 MODULE_LICENSE("GPL");
432 MODULE_AUTHOR("Michal Ludvig");
433 
434 MODULE_ALIAS("aes");
435