xref: /openbmc/linux/drivers/crypto/padlock-sha.c (revision 64c70b1c)
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 
15 #include <crypto/algapi.h>
16 #include <linux/err.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/cryptohash.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/scatterlist.h>
24 #include "padlock.h"
25 
26 #define SHA1_DEFAULT_FALLBACK	"sha1-generic"
27 #define SHA1_DIGEST_SIZE        20
28 #define SHA1_HMAC_BLOCK_SIZE    64
29 
30 #define SHA256_DEFAULT_FALLBACK "sha256-generic"
31 #define SHA256_DIGEST_SIZE      32
32 #define SHA256_HMAC_BLOCK_SIZE  64
33 
34 struct padlock_sha_ctx {
35 	char		*data;
36 	size_t		used;
37 	int		bypass;
38 	void (*f_sha_padlock)(const char *in, char *out, int count);
39 	struct hash_desc fallback;
40 };
41 
42 static inline struct padlock_sha_ctx *ctx(struct crypto_tfm *tfm)
43 {
44 	return crypto_tfm_ctx(tfm);
45 }
46 
47 /* We'll need aligned address on the stack */
48 #define NEAREST_ALIGNED(ptr) \
49 	((void *)ALIGN((size_t)(ptr), PADLOCK_ALIGNMENT))
50 
51 static struct crypto_alg sha1_alg, sha256_alg;
52 
53 static void padlock_sha_bypass(struct crypto_tfm *tfm)
54 {
55 	if (ctx(tfm)->bypass)
56 		return;
57 
58 	crypto_hash_init(&ctx(tfm)->fallback);
59 	if (ctx(tfm)->data && ctx(tfm)->used) {
60 		struct scatterlist sg;
61 
62 		sg_set_buf(&sg, ctx(tfm)->data, ctx(tfm)->used);
63 		crypto_hash_update(&ctx(tfm)->fallback, &sg, sg.length);
64 	}
65 
66 	ctx(tfm)->used = 0;
67 	ctx(tfm)->bypass = 1;
68 }
69 
70 static void padlock_sha_init(struct crypto_tfm *tfm)
71 {
72 	ctx(tfm)->used = 0;
73 	ctx(tfm)->bypass = 0;
74 }
75 
76 static void padlock_sha_update(struct crypto_tfm *tfm,
77 			const uint8_t *data, unsigned int length)
78 {
79 	/* Our buffer is always one page. */
80 	if (unlikely(!ctx(tfm)->bypass &&
81 		     (ctx(tfm)->used + length > PAGE_SIZE)))
82 		padlock_sha_bypass(tfm);
83 
84 	if (unlikely(ctx(tfm)->bypass)) {
85 		struct scatterlist sg;
86 		sg_set_buf(&sg, (uint8_t *)data, length);
87 		crypto_hash_update(&ctx(tfm)->fallback, &sg, length);
88 		return;
89 	}
90 
91 	memcpy(ctx(tfm)->data + ctx(tfm)->used, data, length);
92 	ctx(tfm)->used += length;
93 }
94 
95 static inline void padlock_output_block(uint32_t *src,
96 		 	uint32_t *dst, size_t count)
97 {
98 	while (count--)
99 		*dst++ = swab32(*src++);
100 }
101 
102 static void padlock_do_sha1(const char *in, char *out, int count)
103 {
104 	/* We can't store directly to *out as it may be unaligned. */
105 	/* BTW Don't reduce the buffer size below 128 Bytes!
106 	 *     PadLock microcode needs it that big. */
107 	char buf[128+16];
108 	char *result = NEAREST_ALIGNED(buf);
109 
110 	((uint32_t *)result)[0] = 0x67452301;
111 	((uint32_t *)result)[1] = 0xEFCDAB89;
112 	((uint32_t *)result)[2] = 0x98BADCFE;
113 	((uint32_t *)result)[3] = 0x10325476;
114 	((uint32_t *)result)[4] = 0xC3D2E1F0;
115 
116 	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
117 		      : "+S"(in), "+D"(result)
118 		      : "c"(count), "a"(0));
119 
120 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
121 }
122 
123 static void padlock_do_sha256(const char *in, char *out, int count)
124 {
125 	/* We can't store directly to *out as it may be unaligned. */
126 	/* BTW Don't reduce the buffer size below 128 Bytes!
127 	 *     PadLock microcode needs it that big. */
128 	char buf[128+16];
129 	char *result = NEAREST_ALIGNED(buf);
130 
131 	((uint32_t *)result)[0] = 0x6A09E667;
132 	((uint32_t *)result)[1] = 0xBB67AE85;
133 	((uint32_t *)result)[2] = 0x3C6EF372;
134 	((uint32_t *)result)[3] = 0xA54FF53A;
135 	((uint32_t *)result)[4] = 0x510E527F;
136 	((uint32_t *)result)[5] = 0x9B05688C;
137 	((uint32_t *)result)[6] = 0x1F83D9AB;
138 	((uint32_t *)result)[7] = 0x5BE0CD19;
139 
140 	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
141 		      : "+S"(in), "+D"(result)
142 		      : "c"(count), "a"(0));
143 
144 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
145 }
146 
147 static void padlock_sha_final(struct crypto_tfm *tfm, uint8_t *out)
148 {
149 	if (unlikely(ctx(tfm)->bypass)) {
150 		crypto_hash_final(&ctx(tfm)->fallback, out);
151 		ctx(tfm)->bypass = 0;
152 		return;
153 	}
154 
155 	/* Pass the input buffer to PadLock microcode... */
156 	ctx(tfm)->f_sha_padlock(ctx(tfm)->data, out, ctx(tfm)->used);
157 
158 	ctx(tfm)->used = 0;
159 }
160 
161 static int padlock_cra_init(struct crypto_tfm *tfm)
162 {
163 	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
164 	struct crypto_hash *fallback_tfm;
165 
166 	/* For now we'll allocate one page. This
167 	 * could eventually be configurable one day. */
168 	ctx(tfm)->data = (char *)__get_free_page(GFP_KERNEL);
169 	if (!ctx(tfm)->data)
170 		return -ENOMEM;
171 
172 	/* Allocate a fallback and abort if it failed. */
173 	fallback_tfm = crypto_alloc_hash(fallback_driver_name, 0,
174 					 CRYPTO_ALG_ASYNC |
175 					 CRYPTO_ALG_NEED_FALLBACK);
176 	if (IS_ERR(fallback_tfm)) {
177 		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
178 		       fallback_driver_name);
179 		free_page((unsigned long)(ctx(tfm)->data));
180 		return PTR_ERR(fallback_tfm);
181 	}
182 
183 	ctx(tfm)->fallback.tfm = fallback_tfm;
184 	return 0;
185 }
186 
187 static int padlock_sha1_cra_init(struct crypto_tfm *tfm)
188 {
189 	ctx(tfm)->f_sha_padlock = padlock_do_sha1;
190 
191 	return padlock_cra_init(tfm);
192 }
193 
194 static int padlock_sha256_cra_init(struct crypto_tfm *tfm)
195 {
196 	ctx(tfm)->f_sha_padlock = padlock_do_sha256;
197 
198 	return padlock_cra_init(tfm);
199 }
200 
201 static void padlock_cra_exit(struct crypto_tfm *tfm)
202 {
203 	if (ctx(tfm)->data) {
204 		free_page((unsigned long)(ctx(tfm)->data));
205 		ctx(tfm)->data = NULL;
206 	}
207 
208 	crypto_free_hash(ctx(tfm)->fallback.tfm);
209 	ctx(tfm)->fallback.tfm = NULL;
210 }
211 
212 static struct crypto_alg sha1_alg = {
213 	.cra_name		=	"sha1",
214 	.cra_driver_name	=	"sha1-padlock",
215 	.cra_priority		=	PADLOCK_CRA_PRIORITY,
216 	.cra_flags		=	CRYPTO_ALG_TYPE_DIGEST |
217 					CRYPTO_ALG_NEED_FALLBACK,
218 	.cra_blocksize		=	SHA1_HMAC_BLOCK_SIZE,
219 	.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
220 	.cra_module		=	THIS_MODULE,
221 	.cra_list		=	LIST_HEAD_INIT(sha1_alg.cra_list),
222 	.cra_init		=	padlock_sha1_cra_init,
223 	.cra_exit		=	padlock_cra_exit,
224 	.cra_u			=	{
225 		.digest = {
226 			.dia_digestsize	=	SHA1_DIGEST_SIZE,
227 			.dia_init   	= 	padlock_sha_init,
228 			.dia_update 	=	padlock_sha_update,
229 			.dia_final  	=	padlock_sha_final,
230 		}
231 	}
232 };
233 
234 static struct crypto_alg sha256_alg = {
235 	.cra_name		=	"sha256",
236 	.cra_driver_name	=	"sha256-padlock",
237 	.cra_priority		=	PADLOCK_CRA_PRIORITY,
238 	.cra_flags		=	CRYPTO_ALG_TYPE_DIGEST |
239 					CRYPTO_ALG_NEED_FALLBACK,
240 	.cra_blocksize		=	SHA256_HMAC_BLOCK_SIZE,
241 	.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
242 	.cra_module		=	THIS_MODULE,
243 	.cra_list		=	LIST_HEAD_INIT(sha256_alg.cra_list),
244 	.cra_init		=	padlock_sha256_cra_init,
245 	.cra_exit		=	padlock_cra_exit,
246 	.cra_u			=	{
247 		.digest = {
248 			.dia_digestsize	=	SHA256_DIGEST_SIZE,
249 			.dia_init   	= 	padlock_sha_init,
250 			.dia_update 	=	padlock_sha_update,
251 			.dia_final  	=	padlock_sha_final,
252 		}
253 	}
254 };
255 
256 static void __init padlock_sha_check_fallbacks(void)
257 {
258 	if (!crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC |
259 					CRYPTO_ALG_NEED_FALLBACK))
260 		printk(KERN_WARNING PFX
261 		       "Couldn't load fallback module for sha1.\n");
262 
263 	if (!crypto_has_hash("sha256", 0, CRYPTO_ALG_ASYNC |
264 					CRYPTO_ALG_NEED_FALLBACK))
265 		printk(KERN_WARNING PFX
266 		       "Couldn't load fallback module for sha256.\n");
267 }
268 
269 static int __init padlock_init(void)
270 {
271 	int rc = -ENODEV;
272 
273 	if (!cpu_has_phe) {
274 		printk(KERN_ERR PFX "VIA PadLock Hash Engine not detected.\n");
275 		return -ENODEV;
276 	}
277 
278 	if (!cpu_has_phe_enabled) {
279 		printk(KERN_ERR PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
280 		return -ENODEV;
281 	}
282 
283 	padlock_sha_check_fallbacks();
284 
285 	rc = crypto_register_alg(&sha1_alg);
286 	if (rc)
287 		goto out;
288 
289 	rc = crypto_register_alg(&sha256_alg);
290 	if (rc)
291 		goto out_unreg1;
292 
293 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
294 
295 	return 0;
296 
297 out_unreg1:
298 	crypto_unregister_alg(&sha1_alg);
299 out:
300 	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
301 	return rc;
302 }
303 
304 static void __exit padlock_fini(void)
305 {
306 	crypto_unregister_alg(&sha1_alg);
307 	crypto_unregister_alg(&sha256_alg);
308 }
309 
310 module_init(padlock_init);
311 module_exit(padlock_fini);
312 
313 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
314 MODULE_LICENSE("GPL");
315 MODULE_AUTHOR("Michal Ludvig");
316 
317 MODULE_ALIAS("sha1-padlock");
318 MODULE_ALIAS("sha256-padlock");
319