xref: /openbmc/linux/arch/x86/crypto/blowfish_glue.c (revision ee89bd6b)
1 /*
2  * Glue Code for assembler optimized version of Blowfish
3  *
4  * Copyright © 2011-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
5  *
6  * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7  *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8  * CTR part based on code (crypto/ctr.c) by:
9  *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
24  * USA
25  *
26  */
27 
28 #include <asm/processor.h>
29 #include <crypto/blowfish.h>
30 #include <linux/crypto.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <crypto/algapi.h>
35 #include <asm/crypto/blowfish.h>
36 
37 /* regular block cipher functions */
38 asmlinkage void __blowfish_enc_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src,
39 				   bool xor);
40 EXPORT_SYMBOL_GPL(__blowfish_enc_blk);
41 
42 asmlinkage void blowfish_dec_blk(struct bf_ctx *ctx, u8 *dst, const u8 *src);
43 EXPORT_SYMBOL_GPL(blowfish_dec_blk);
44 
45 /* 4-way parallel cipher functions */
46 asmlinkage void __blowfish_enc_blk_4way(struct bf_ctx *ctx, u8 *dst,
47 					const u8 *src, bool xor);
48 EXPORT_SYMBOL_GPL(__blowfish_enc_blk_4way);
49 
50 asmlinkage void blowfish_dec_blk_4way(struct bf_ctx *ctx, u8 *dst,
51 				      const u8 *src);
52 EXPORT_SYMBOL_GPL(blowfish_dec_blk_4way);
53 
54 static void blowfish_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
55 {
56 	blowfish_enc_blk(crypto_tfm_ctx(tfm), dst, src);
57 }
58 
59 static void blowfish_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
60 {
61 	blowfish_dec_blk(crypto_tfm_ctx(tfm), dst, src);
62 }
63 
64 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
65 		     void (*fn)(struct bf_ctx *, u8 *, const u8 *),
66 		     void (*fn_4way)(struct bf_ctx *, u8 *, const u8 *))
67 {
68 	struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
69 	unsigned int bsize = BF_BLOCK_SIZE;
70 	unsigned int nbytes;
71 	int err;
72 
73 	err = blkcipher_walk_virt(desc, walk);
74 
75 	while ((nbytes = walk->nbytes)) {
76 		u8 *wsrc = walk->src.virt.addr;
77 		u8 *wdst = walk->dst.virt.addr;
78 
79 		/* Process four block batch */
80 		if (nbytes >= bsize * 4) {
81 			do {
82 				fn_4way(ctx, wdst, wsrc);
83 
84 				wsrc += bsize * 4;
85 				wdst += bsize * 4;
86 				nbytes -= bsize * 4;
87 			} while (nbytes >= bsize * 4);
88 
89 			if (nbytes < bsize)
90 				goto done;
91 		}
92 
93 		/* Handle leftovers */
94 		do {
95 			fn(ctx, wdst, wsrc);
96 
97 			wsrc += bsize;
98 			wdst += bsize;
99 			nbytes -= bsize;
100 		} while (nbytes >= bsize);
101 
102 done:
103 		err = blkcipher_walk_done(desc, walk, nbytes);
104 	}
105 
106 	return err;
107 }
108 
109 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
110 		       struct scatterlist *src, unsigned int nbytes)
111 {
112 	struct blkcipher_walk walk;
113 
114 	blkcipher_walk_init(&walk, dst, src, nbytes);
115 	return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way);
116 }
117 
118 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
119 		       struct scatterlist *src, unsigned int nbytes)
120 {
121 	struct blkcipher_walk walk;
122 
123 	blkcipher_walk_init(&walk, dst, src, nbytes);
124 	return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way);
125 }
126 
127 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
128 				  struct blkcipher_walk *walk)
129 {
130 	struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
131 	unsigned int bsize = BF_BLOCK_SIZE;
132 	unsigned int nbytes = walk->nbytes;
133 	u64 *src = (u64 *)walk->src.virt.addr;
134 	u64 *dst = (u64 *)walk->dst.virt.addr;
135 	u64 *iv = (u64 *)walk->iv;
136 
137 	do {
138 		*dst = *src ^ *iv;
139 		blowfish_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
140 		iv = dst;
141 
142 		src += 1;
143 		dst += 1;
144 		nbytes -= bsize;
145 	} while (nbytes >= bsize);
146 
147 	*(u64 *)walk->iv = *iv;
148 	return nbytes;
149 }
150 
151 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
152 		       struct scatterlist *src, unsigned int nbytes)
153 {
154 	struct blkcipher_walk walk;
155 	int err;
156 
157 	blkcipher_walk_init(&walk, dst, src, nbytes);
158 	err = blkcipher_walk_virt(desc, &walk);
159 
160 	while ((nbytes = walk.nbytes)) {
161 		nbytes = __cbc_encrypt(desc, &walk);
162 		err = blkcipher_walk_done(desc, &walk, nbytes);
163 	}
164 
165 	return err;
166 }
167 
168 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
169 				  struct blkcipher_walk *walk)
170 {
171 	struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
172 	unsigned int bsize = BF_BLOCK_SIZE;
173 	unsigned int nbytes = walk->nbytes;
174 	u64 *src = (u64 *)walk->src.virt.addr;
175 	u64 *dst = (u64 *)walk->dst.virt.addr;
176 	u64 ivs[4 - 1];
177 	u64 last_iv;
178 
179 	/* Start of the last block. */
180 	src += nbytes / bsize - 1;
181 	dst += nbytes / bsize - 1;
182 
183 	last_iv = *src;
184 
185 	/* Process four block batch */
186 	if (nbytes >= bsize * 4) {
187 		do {
188 			nbytes -= bsize * 4 - bsize;
189 			src -= 4 - 1;
190 			dst -= 4 - 1;
191 
192 			ivs[0] = src[0];
193 			ivs[1] = src[1];
194 			ivs[2] = src[2];
195 
196 			blowfish_dec_blk_4way(ctx, (u8 *)dst, (u8 *)src);
197 
198 			dst[1] ^= ivs[0];
199 			dst[2] ^= ivs[1];
200 			dst[3] ^= ivs[2];
201 
202 			nbytes -= bsize;
203 			if (nbytes < bsize)
204 				goto done;
205 
206 			*dst ^= *(src - 1);
207 			src -= 1;
208 			dst -= 1;
209 		} while (nbytes >= bsize * 4);
210 
211 		if (nbytes < bsize)
212 			goto done;
213 	}
214 
215 	/* Handle leftovers */
216 	for (;;) {
217 		blowfish_dec_blk(ctx, (u8 *)dst, (u8 *)src);
218 
219 		nbytes -= bsize;
220 		if (nbytes < bsize)
221 			break;
222 
223 		*dst ^= *(src - 1);
224 		src -= 1;
225 		dst -= 1;
226 	}
227 
228 done:
229 	*dst ^= *(u64 *)walk->iv;
230 	*(u64 *)walk->iv = last_iv;
231 
232 	return nbytes;
233 }
234 
235 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
236 		       struct scatterlist *src, unsigned int nbytes)
237 {
238 	struct blkcipher_walk walk;
239 	int err;
240 
241 	blkcipher_walk_init(&walk, dst, src, nbytes);
242 	err = blkcipher_walk_virt(desc, &walk);
243 
244 	while ((nbytes = walk.nbytes)) {
245 		nbytes = __cbc_decrypt(desc, &walk);
246 		err = blkcipher_walk_done(desc, &walk, nbytes);
247 	}
248 
249 	return err;
250 }
251 
252 static void ctr_crypt_final(struct bf_ctx *ctx, struct blkcipher_walk *walk)
253 {
254 	u8 *ctrblk = walk->iv;
255 	u8 keystream[BF_BLOCK_SIZE];
256 	u8 *src = walk->src.virt.addr;
257 	u8 *dst = walk->dst.virt.addr;
258 	unsigned int nbytes = walk->nbytes;
259 
260 	blowfish_enc_blk(ctx, keystream, ctrblk);
261 	crypto_xor(keystream, src, nbytes);
262 	memcpy(dst, keystream, nbytes);
263 
264 	crypto_inc(ctrblk, BF_BLOCK_SIZE);
265 }
266 
267 static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
268 				struct blkcipher_walk *walk)
269 {
270 	struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
271 	unsigned int bsize = BF_BLOCK_SIZE;
272 	unsigned int nbytes = walk->nbytes;
273 	u64 *src = (u64 *)walk->src.virt.addr;
274 	u64 *dst = (u64 *)walk->dst.virt.addr;
275 	u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
276 	__be64 ctrblocks[4];
277 
278 	/* Process four block batch */
279 	if (nbytes >= bsize * 4) {
280 		do {
281 			if (dst != src) {
282 				dst[0] = src[0];
283 				dst[1] = src[1];
284 				dst[2] = src[2];
285 				dst[3] = src[3];
286 			}
287 
288 			/* create ctrblks for parallel encrypt */
289 			ctrblocks[0] = cpu_to_be64(ctrblk++);
290 			ctrblocks[1] = cpu_to_be64(ctrblk++);
291 			ctrblocks[2] = cpu_to_be64(ctrblk++);
292 			ctrblocks[3] = cpu_to_be64(ctrblk++);
293 
294 			blowfish_enc_blk_xor_4way(ctx, (u8 *)dst,
295 						  (u8 *)ctrblocks);
296 
297 			src += 4;
298 			dst += 4;
299 		} while ((nbytes -= bsize * 4) >= bsize * 4);
300 
301 		if (nbytes < bsize)
302 			goto done;
303 	}
304 
305 	/* Handle leftovers */
306 	do {
307 		if (dst != src)
308 			*dst = *src;
309 
310 		ctrblocks[0] = cpu_to_be64(ctrblk++);
311 
312 		blowfish_enc_blk_xor(ctx, (u8 *)dst, (u8 *)ctrblocks);
313 
314 		src += 1;
315 		dst += 1;
316 	} while ((nbytes -= bsize) >= bsize);
317 
318 done:
319 	*(__be64 *)walk->iv = cpu_to_be64(ctrblk);
320 	return nbytes;
321 }
322 
323 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
324 		     struct scatterlist *src, unsigned int nbytes)
325 {
326 	struct blkcipher_walk walk;
327 	int err;
328 
329 	blkcipher_walk_init(&walk, dst, src, nbytes);
330 	err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE);
331 
332 	while ((nbytes = walk.nbytes) >= BF_BLOCK_SIZE) {
333 		nbytes = __ctr_crypt(desc, &walk);
334 		err = blkcipher_walk_done(desc, &walk, nbytes);
335 	}
336 
337 	if (walk.nbytes) {
338 		ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
339 		err = blkcipher_walk_done(desc, &walk, 0);
340 	}
341 
342 	return err;
343 }
344 
345 static struct crypto_alg bf_algs[4] = { {
346 	.cra_name		= "blowfish",
347 	.cra_driver_name	= "blowfish-asm",
348 	.cra_priority		= 200,
349 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
350 	.cra_blocksize		= BF_BLOCK_SIZE,
351 	.cra_ctxsize		= sizeof(struct bf_ctx),
352 	.cra_alignmask		= 0,
353 	.cra_module		= THIS_MODULE,
354 	.cra_u = {
355 		.cipher = {
356 			.cia_min_keysize	= BF_MIN_KEY_SIZE,
357 			.cia_max_keysize	= BF_MAX_KEY_SIZE,
358 			.cia_setkey		= blowfish_setkey,
359 			.cia_encrypt		= blowfish_encrypt,
360 			.cia_decrypt		= blowfish_decrypt,
361 		}
362 	}
363 }, {
364 	.cra_name		= "ecb(blowfish)",
365 	.cra_driver_name	= "ecb-blowfish-asm",
366 	.cra_priority		= 300,
367 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
368 	.cra_blocksize		= BF_BLOCK_SIZE,
369 	.cra_ctxsize		= sizeof(struct bf_ctx),
370 	.cra_alignmask		= 0,
371 	.cra_type		= &crypto_blkcipher_type,
372 	.cra_module		= THIS_MODULE,
373 	.cra_u = {
374 		.blkcipher = {
375 			.min_keysize	= BF_MIN_KEY_SIZE,
376 			.max_keysize	= BF_MAX_KEY_SIZE,
377 			.setkey		= blowfish_setkey,
378 			.encrypt	= ecb_encrypt,
379 			.decrypt	= ecb_decrypt,
380 		},
381 	},
382 }, {
383 	.cra_name		= "cbc(blowfish)",
384 	.cra_driver_name	= "cbc-blowfish-asm",
385 	.cra_priority		= 300,
386 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
387 	.cra_blocksize		= BF_BLOCK_SIZE,
388 	.cra_ctxsize		= sizeof(struct bf_ctx),
389 	.cra_alignmask		= 0,
390 	.cra_type		= &crypto_blkcipher_type,
391 	.cra_module		= THIS_MODULE,
392 	.cra_u = {
393 		.blkcipher = {
394 			.min_keysize	= BF_MIN_KEY_SIZE,
395 			.max_keysize	= BF_MAX_KEY_SIZE,
396 			.ivsize		= BF_BLOCK_SIZE,
397 			.setkey		= blowfish_setkey,
398 			.encrypt	= cbc_encrypt,
399 			.decrypt	= cbc_decrypt,
400 		},
401 	},
402 }, {
403 	.cra_name		= "ctr(blowfish)",
404 	.cra_driver_name	= "ctr-blowfish-asm",
405 	.cra_priority		= 300,
406 	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
407 	.cra_blocksize		= 1,
408 	.cra_ctxsize		= sizeof(struct bf_ctx),
409 	.cra_alignmask		= 0,
410 	.cra_type		= &crypto_blkcipher_type,
411 	.cra_module		= THIS_MODULE,
412 	.cra_u = {
413 		.blkcipher = {
414 			.min_keysize	= BF_MIN_KEY_SIZE,
415 			.max_keysize	= BF_MAX_KEY_SIZE,
416 			.ivsize		= BF_BLOCK_SIZE,
417 			.setkey		= blowfish_setkey,
418 			.encrypt	= ctr_crypt,
419 			.decrypt	= ctr_crypt,
420 		},
421 	},
422 } };
423 
424 static bool is_blacklisted_cpu(void)
425 {
426 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
427 		return false;
428 
429 	if (boot_cpu_data.x86 == 0x0f) {
430 		/*
431 		 * On Pentium 4, blowfish-x86_64 is slower than generic C
432 		 * implementation because use of 64bit rotates (which are really
433 		 * slow on P4). Therefore blacklist P4s.
434 		 */
435 		return true;
436 	}
437 
438 	return false;
439 }
440 
441 static int force;
442 module_param(force, int, 0);
443 MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
444 
445 static int __init init(void)
446 {
447 	if (!force && is_blacklisted_cpu()) {
448 		printk(KERN_INFO
449 			"blowfish-x86_64: performance on this CPU "
450 			"would be suboptimal: disabling "
451 			"blowfish-x86_64.\n");
452 		return -ENODEV;
453 	}
454 
455 	return crypto_register_algs(bf_algs, ARRAY_SIZE(bf_algs));
456 }
457 
458 static void __exit fini(void)
459 {
460 	crypto_unregister_algs(bf_algs, ARRAY_SIZE(bf_algs));
461 }
462 
463 module_init(init);
464 module_exit(fini);
465 
466 MODULE_LICENSE("GPL");
467 MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
468 MODULE_ALIAS("blowfish");
469 MODULE_ALIAS("blowfish-asm");
470