xref: /openbmc/linux/drivers/crypto/padlock-aes.c (revision 4a075bd4)
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
7  *
8  */
9 
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <crypto/padlock.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/percpu.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/byteorder.h>
24 #include <asm/processor.h>
25 #include <asm/fpu/api.h>
26 
27 /*
28  * Number of data blocks actually fetched for each xcrypt insn.
29  * Processors with prefetch errata will fetch extra blocks.
30  */
31 static unsigned int ecb_fetch_blocks = 2;
32 #define MAX_ECB_FETCH_BLOCKS (8)
33 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
34 
35 static unsigned int cbc_fetch_blocks = 1;
36 #define MAX_CBC_FETCH_BLOCKS (4)
37 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
38 
39 /* Control word. */
40 struct cword {
41 	unsigned int __attribute__ ((__packed__))
42 		rounds:4,
43 		algo:3,
44 		keygen:1,
45 		interm:1,
46 		encdec:1,
47 		ksize:2;
48 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
49 
50 /* Whenever making any changes to the following
51  * structure *make sure* you keep E, d_data
52  * and cword aligned on 16 Bytes boundaries and
53  * the Hardware can access 16 * 16 bytes of E and d_data
54  * (only the first 15 * 16 bytes matter but the HW reads
55  * more).
56  */
57 struct aes_ctx {
58 	u32 E[AES_MAX_KEYLENGTH_U32]
59 		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
60 	u32 d_data[AES_MAX_KEYLENGTH_U32]
61 		__attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62 	struct {
63 		struct cword encrypt;
64 		struct cword decrypt;
65 	} cword;
66 	u32 *D;
67 };
68 
69 static DEFINE_PER_CPU(struct cword *, paes_last_cword);
70 
71 /* Tells whether the ACE is capable to generate
72    the extended key for a given key_len. */
73 static inline int
74 aes_hw_extkey_available(uint8_t key_len)
75 {
76 	/* TODO: We should check the actual CPU model/stepping
77 	         as it's possible that the capability will be
78 	         added in the next CPU revisions. */
79 	if (key_len == 16)
80 		return 1;
81 	return 0;
82 }
83 
84 static inline struct aes_ctx *aes_ctx_common(void *ctx)
85 {
86 	unsigned long addr = (unsigned long)ctx;
87 	unsigned long align = PADLOCK_ALIGNMENT;
88 
89 	if (align <= crypto_tfm_ctx_alignment())
90 		align = 1;
91 	return (struct aes_ctx *)ALIGN(addr, align);
92 }
93 
94 static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
95 {
96 	return aes_ctx_common(crypto_tfm_ctx(tfm));
97 }
98 
99 static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
100 {
101 	return aes_ctx_common(crypto_blkcipher_ctx(tfm));
102 }
103 
104 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
105 		       unsigned int key_len)
106 {
107 	struct aes_ctx *ctx = aes_ctx(tfm);
108 	const __le32 *key = (const __le32 *)in_key;
109 	u32 *flags = &tfm->crt_flags;
110 	struct crypto_aes_ctx gen_aes;
111 	int cpu;
112 
113 	if (key_len % 8) {
114 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
115 		return -EINVAL;
116 	}
117 
118 	/*
119 	 * If the hardware is capable of generating the extended key
120 	 * itself we must supply the plain key for both encryption
121 	 * and decryption.
122 	 */
123 	ctx->D = ctx->E;
124 
125 	ctx->E[0] = le32_to_cpu(key[0]);
126 	ctx->E[1] = le32_to_cpu(key[1]);
127 	ctx->E[2] = le32_to_cpu(key[2]);
128 	ctx->E[3] = le32_to_cpu(key[3]);
129 
130 	/* Prepare control words. */
131 	memset(&ctx->cword, 0, sizeof(ctx->cword));
132 
133 	ctx->cword.decrypt.encdec = 1;
134 	ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
135 	ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
136 	ctx->cword.encrypt.ksize = (key_len - 16) / 8;
137 	ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
138 
139 	/* Don't generate extended keys if the hardware can do it. */
140 	if (aes_hw_extkey_available(key_len))
141 		goto ok;
142 
143 	ctx->D = ctx->d_data;
144 	ctx->cword.encrypt.keygen = 1;
145 	ctx->cword.decrypt.keygen = 1;
146 
147 	if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
148 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
149 		return -EINVAL;
150 	}
151 
152 	memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
153 	memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
154 
155 ok:
156 	for_each_online_cpu(cpu)
157 		if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
158 		    &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
159 			per_cpu(paes_last_cword, cpu) = NULL;
160 
161 	return 0;
162 }
163 
164 /* ====== Encryption/decryption routines ====== */
165 
166 /* These are the real call to PadLock. */
167 static inline void padlock_reset_key(struct cword *cword)
168 {
169 	int cpu = raw_smp_processor_id();
170 
171 	if (cword != per_cpu(paes_last_cword, cpu))
172 #ifndef CONFIG_X86_64
173 		asm volatile ("pushfl; popfl");
174 #else
175 		asm volatile ("pushfq; popfq");
176 #endif
177 }
178 
179 static inline void padlock_store_cword(struct cword *cword)
180 {
181 	per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
182 }
183 
184 /*
185  * While the padlock instructions don't use FP/SSE registers, they
186  * generate a spurious DNA fault when CR0.TS is '1'.  Fortunately,
187  * the kernel doesn't use CR0.TS.
188  */
189 
190 static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
191 				  struct cword *control_word, int count)
192 {
193 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
194 		      : "+S"(input), "+D"(output)
195 		      : "d"(control_word), "b"(key), "c"(count));
196 }
197 
198 static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
199 				 u8 *iv, struct cword *control_word, int count)
200 {
201 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
202 		      : "+S" (input), "+D" (output), "+a" (iv)
203 		      : "d" (control_word), "b" (key), "c" (count));
204 	return iv;
205 }
206 
207 static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
208 			   struct cword *cword, int count)
209 {
210 	/*
211 	 * Padlock prefetches extra data so we must provide mapped input buffers.
212 	 * Assume there are at least 16 bytes of stack already in use.
213 	 */
214 	u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
215 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
216 
217 	memcpy(tmp, in, count * AES_BLOCK_SIZE);
218 	rep_xcrypt_ecb(tmp, out, key, cword, count);
219 }
220 
221 static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
222 			   u8 *iv, struct cword *cword, int count)
223 {
224 	/*
225 	 * Padlock prefetches extra data so we must provide mapped input buffers.
226 	 * Assume there are at least 16 bytes of stack already in use.
227 	 */
228 	u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
229 	u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
230 
231 	memcpy(tmp, in, count * AES_BLOCK_SIZE);
232 	return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
233 }
234 
235 static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
236 			     struct cword *cword, int count)
237 {
238 	/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
239 	 * We could avoid some copying here but it's probably not worth it.
240 	 */
241 	if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
242 		ecb_crypt_copy(in, out, key, cword, count);
243 		return;
244 	}
245 
246 	rep_xcrypt_ecb(in, out, key, cword, count);
247 }
248 
249 static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
250 			    u8 *iv, struct cword *cword, int count)
251 {
252 	/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
253 	if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
254 		return cbc_crypt_copy(in, out, key, iv, cword, count);
255 
256 	return rep_xcrypt_cbc(in, out, key, iv, cword, count);
257 }
258 
259 static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
260 				      void *control_word, u32 count)
261 {
262 	u32 initial = count & (ecb_fetch_blocks - 1);
263 
264 	if (count < ecb_fetch_blocks) {
265 		ecb_crypt(input, output, key, control_word, count);
266 		return;
267 	}
268 
269 	count -= initial;
270 
271 	if (initial)
272 		asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
273 			      : "+S"(input), "+D"(output)
274 			      : "d"(control_word), "b"(key), "c"(initial));
275 
276 	asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"	/* rep xcryptecb */
277 		      : "+S"(input), "+D"(output)
278 		      : "d"(control_word), "b"(key), "c"(count));
279 }
280 
281 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
282 				     u8 *iv, void *control_word, u32 count)
283 {
284 	u32 initial = count & (cbc_fetch_blocks - 1);
285 
286 	if (count < cbc_fetch_blocks)
287 		return cbc_crypt(input, output, key, iv, control_word, count);
288 
289 	count -= initial;
290 
291 	if (initial)
292 		asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
293 			      : "+S" (input), "+D" (output), "+a" (iv)
294 			      : "d" (control_word), "b" (key), "c" (initial));
295 
296 	asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"	/* rep xcryptcbc */
297 		      : "+S" (input), "+D" (output), "+a" (iv)
298 		      : "d" (control_word), "b" (key), "c" (count));
299 	return iv;
300 }
301 
302 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
303 {
304 	struct aes_ctx *ctx = aes_ctx(tfm);
305 
306 	padlock_reset_key(&ctx->cword.encrypt);
307 	ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
308 	padlock_store_cword(&ctx->cword.encrypt);
309 }
310 
311 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
312 {
313 	struct aes_ctx *ctx = aes_ctx(tfm);
314 
315 	padlock_reset_key(&ctx->cword.encrypt);
316 	ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
317 	padlock_store_cword(&ctx->cword.encrypt);
318 }
319 
320 static struct crypto_alg aes_alg = {
321 	.cra_name		=	"aes",
322 	.cra_driver_name	=	"aes-padlock",
323 	.cra_priority		=	PADLOCK_CRA_PRIORITY,
324 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER,
325 	.cra_blocksize		=	AES_BLOCK_SIZE,
326 	.cra_ctxsize		=	sizeof(struct aes_ctx),
327 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
328 	.cra_module		=	THIS_MODULE,
329 	.cra_u			=	{
330 		.cipher = {
331 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
332 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
333 			.cia_setkey	   	= 	aes_set_key,
334 			.cia_encrypt	 	=	aes_encrypt,
335 			.cia_decrypt	  	=	aes_decrypt,
336 		}
337 	}
338 };
339 
340 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
341 			   struct scatterlist *dst, struct scatterlist *src,
342 			   unsigned int nbytes)
343 {
344 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
345 	struct blkcipher_walk walk;
346 	int err;
347 
348 	padlock_reset_key(&ctx->cword.encrypt);
349 
350 	blkcipher_walk_init(&walk, dst, src, nbytes);
351 	err = blkcipher_walk_virt(desc, &walk);
352 
353 	while ((nbytes = walk.nbytes)) {
354 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
355 				   ctx->E, &ctx->cword.encrypt,
356 				   nbytes / AES_BLOCK_SIZE);
357 		nbytes &= AES_BLOCK_SIZE - 1;
358 		err = blkcipher_walk_done(desc, &walk, nbytes);
359 	}
360 
361 	padlock_store_cword(&ctx->cword.encrypt);
362 
363 	return err;
364 }
365 
366 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
367 			   struct scatterlist *dst, struct scatterlist *src,
368 			   unsigned int nbytes)
369 {
370 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
371 	struct blkcipher_walk walk;
372 	int err;
373 
374 	padlock_reset_key(&ctx->cword.decrypt);
375 
376 	blkcipher_walk_init(&walk, dst, src, nbytes);
377 	err = blkcipher_walk_virt(desc, &walk);
378 
379 	while ((nbytes = walk.nbytes)) {
380 		padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
381 				   ctx->D, &ctx->cword.decrypt,
382 				   nbytes / AES_BLOCK_SIZE);
383 		nbytes &= AES_BLOCK_SIZE - 1;
384 		err = blkcipher_walk_done(desc, &walk, nbytes);
385 	}
386 
387 	padlock_store_cword(&ctx->cword.encrypt);
388 
389 	return err;
390 }
391 
392 static struct crypto_alg ecb_aes_alg = {
393 	.cra_name		=	"ecb(aes)",
394 	.cra_driver_name	=	"ecb-aes-padlock",
395 	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
396 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
397 	.cra_blocksize		=	AES_BLOCK_SIZE,
398 	.cra_ctxsize		=	sizeof(struct aes_ctx),
399 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
400 	.cra_type		=	&crypto_blkcipher_type,
401 	.cra_module		=	THIS_MODULE,
402 	.cra_u			=	{
403 		.blkcipher = {
404 			.min_keysize		=	AES_MIN_KEY_SIZE,
405 			.max_keysize		=	AES_MAX_KEY_SIZE,
406 			.setkey	   		= 	aes_set_key,
407 			.encrypt		=	ecb_aes_encrypt,
408 			.decrypt		=	ecb_aes_decrypt,
409 		}
410 	}
411 };
412 
413 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
414 			   struct scatterlist *dst, struct scatterlist *src,
415 			   unsigned int nbytes)
416 {
417 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
418 	struct blkcipher_walk walk;
419 	int err;
420 
421 	padlock_reset_key(&ctx->cword.encrypt);
422 
423 	blkcipher_walk_init(&walk, dst, src, nbytes);
424 	err = blkcipher_walk_virt(desc, &walk);
425 
426 	while ((nbytes = walk.nbytes)) {
427 		u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
428 					    walk.dst.virt.addr, ctx->E,
429 					    walk.iv, &ctx->cword.encrypt,
430 					    nbytes / AES_BLOCK_SIZE);
431 		memcpy(walk.iv, iv, AES_BLOCK_SIZE);
432 		nbytes &= AES_BLOCK_SIZE - 1;
433 		err = blkcipher_walk_done(desc, &walk, nbytes);
434 	}
435 
436 	padlock_store_cword(&ctx->cword.decrypt);
437 
438 	return err;
439 }
440 
441 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
442 			   struct scatterlist *dst, struct scatterlist *src,
443 			   unsigned int nbytes)
444 {
445 	struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
446 	struct blkcipher_walk walk;
447 	int err;
448 
449 	padlock_reset_key(&ctx->cword.encrypt);
450 
451 	blkcipher_walk_init(&walk, dst, src, nbytes);
452 	err = blkcipher_walk_virt(desc, &walk);
453 
454 	while ((nbytes = walk.nbytes)) {
455 		padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
456 				   ctx->D, walk.iv, &ctx->cword.decrypt,
457 				   nbytes / AES_BLOCK_SIZE);
458 		nbytes &= AES_BLOCK_SIZE - 1;
459 		err = blkcipher_walk_done(desc, &walk, nbytes);
460 	}
461 
462 	padlock_store_cword(&ctx->cword.encrypt);
463 
464 	return err;
465 }
466 
467 static struct crypto_alg cbc_aes_alg = {
468 	.cra_name		=	"cbc(aes)",
469 	.cra_driver_name	=	"cbc-aes-padlock",
470 	.cra_priority		=	PADLOCK_COMPOSITE_PRIORITY,
471 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
472 	.cra_blocksize		=	AES_BLOCK_SIZE,
473 	.cra_ctxsize		=	sizeof(struct aes_ctx),
474 	.cra_alignmask		=	PADLOCK_ALIGNMENT - 1,
475 	.cra_type		=	&crypto_blkcipher_type,
476 	.cra_module		=	THIS_MODULE,
477 	.cra_u			=	{
478 		.blkcipher = {
479 			.min_keysize		=	AES_MIN_KEY_SIZE,
480 			.max_keysize		=	AES_MAX_KEY_SIZE,
481 			.ivsize			=	AES_BLOCK_SIZE,
482 			.setkey	   		= 	aes_set_key,
483 			.encrypt		=	cbc_aes_encrypt,
484 			.decrypt		=	cbc_aes_decrypt,
485 		}
486 	}
487 };
488 
489 static const struct x86_cpu_id padlock_cpu_id[] = {
490 	X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
491 	{}
492 };
493 MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
494 
495 static int __init padlock_init(void)
496 {
497 	int ret;
498 	struct cpuinfo_x86 *c = &cpu_data(0);
499 
500 	if (!x86_match_cpu(padlock_cpu_id))
501 		return -ENODEV;
502 
503 	if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
504 		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
505 		return -ENODEV;
506 	}
507 
508 	if ((ret = crypto_register_alg(&aes_alg)))
509 		goto aes_err;
510 
511 	if ((ret = crypto_register_alg(&ecb_aes_alg)))
512 		goto ecb_aes_err;
513 
514 	if ((ret = crypto_register_alg(&cbc_aes_alg)))
515 		goto cbc_aes_err;
516 
517 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
518 
519 	if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
520 		ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
521 		cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
522 		printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
523 	}
524 
525 out:
526 	return ret;
527 
528 cbc_aes_err:
529 	crypto_unregister_alg(&ecb_aes_alg);
530 ecb_aes_err:
531 	crypto_unregister_alg(&aes_alg);
532 aes_err:
533 	printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
534 	goto out;
535 }
536 
537 static void __exit padlock_fini(void)
538 {
539 	crypto_unregister_alg(&cbc_aes_alg);
540 	crypto_unregister_alg(&ecb_aes_alg);
541 	crypto_unregister_alg(&aes_alg);
542 }
543 
544 module_init(padlock_init);
545 module_exit(padlock_fini);
546 
547 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
548 MODULE_LICENSE("GPL");
549 MODULE_AUTHOR("Michal Ludvig");
550 
551 MODULE_ALIAS_CRYPTO("aes");
552