xref: /openbmc/linux/drivers/crypto/padlock-sha.c (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 
15 #include <crypto/internal/hash.h>
16 #include <crypto/padlock.h>
17 #include <crypto/sha.h>
18 #include <linux/err.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/errno.h>
22 #include <linux/interrupt.h>
23 #include <linux/kernel.h>
24 #include <linux/scatterlist.h>
25 #include <asm/i387.h>
26 
27 struct padlock_sha_desc {
28 	struct shash_desc fallback;
29 };
30 
31 struct padlock_sha_ctx {
32 	struct crypto_shash *fallback;
33 };
34 
35 static int padlock_sha_init(struct shash_desc *desc)
36 {
37 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
38 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
39 
40 	dctx->fallback.tfm = ctx->fallback;
41 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
42 	return crypto_shash_init(&dctx->fallback);
43 }
44 
45 static int padlock_sha_update(struct shash_desc *desc,
46 			      const u8 *data, unsigned int length)
47 {
48 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
49 
50 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
51 	return crypto_shash_update(&dctx->fallback, data, length);
52 }
53 
54 static int padlock_sha_export(struct shash_desc *desc, void *out)
55 {
56 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
57 
58 	return crypto_shash_export(&dctx->fallback, out);
59 }
60 
61 static int padlock_sha_import(struct shash_desc *desc, const void *in)
62 {
63 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
64 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
65 
66 	dctx->fallback.tfm = ctx->fallback;
67 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
68 	return crypto_shash_import(&dctx->fallback, in);
69 }
70 
71 static inline void padlock_output_block(uint32_t *src,
72 		 	uint32_t *dst, size_t count)
73 {
74 	while (count--)
75 		*dst++ = swab32(*src++);
76 }
77 
78 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
79 			      unsigned int count, u8 *out)
80 {
81 	/* We can't store directly to *out as it may be unaligned. */
82 	/* BTW Don't reduce the buffer size below 128 Bytes!
83 	 *     PadLock microcode needs it that big. */
84 	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
85 		((aligned(STACK_ALIGN)));
86 	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
87 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
88 	struct sha1_state state;
89 	unsigned int space;
90 	unsigned int leftover;
91 	int ts_state;
92 	int err;
93 
94 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
95 	err = crypto_shash_export(&dctx->fallback, &state);
96 	if (err)
97 		goto out;
98 
99 	if (state.count + count > ULONG_MAX)
100 		return crypto_shash_finup(&dctx->fallback, in, count, out);
101 
102 	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
103 	space =  SHA1_BLOCK_SIZE - leftover;
104 	if (space) {
105 		if (count > space) {
106 			err = crypto_shash_update(&dctx->fallback, in, space) ?:
107 			      crypto_shash_export(&dctx->fallback, &state);
108 			if (err)
109 				goto out;
110 			count -= space;
111 			in += space;
112 		} else {
113 			memcpy(state.buffer + leftover, in, count);
114 			in = state.buffer;
115 			count += leftover;
116 			state.count &= ~(SHA1_BLOCK_SIZE - 1);
117 		}
118 	}
119 
120 	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
121 
122 	/* prevent taking the spurious DNA fault with padlock. */
123 	ts_state = irq_ts_save();
124 	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
125 		      : \
126 		      : "c"((unsigned long)state.count + count), \
127 			"a"((unsigned long)state.count), \
128 			"S"(in), "D"(result));
129 	irq_ts_restore(ts_state);
130 
131 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
132 
133 out:
134 	return err;
135 }
136 
137 static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
138 {
139 	u8 buf[4];
140 
141 	return padlock_sha1_finup(desc, buf, 0, out);
142 }
143 
144 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
145 				unsigned int count, u8 *out)
146 {
147 	/* We can't store directly to *out as it may be unaligned. */
148 	/* BTW Don't reduce the buffer size below 128 Bytes!
149 	 *     PadLock microcode needs it that big. */
150 	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
151 		((aligned(STACK_ALIGN)));
152 	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
153 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
154 	struct sha256_state state;
155 	unsigned int space;
156 	unsigned int leftover;
157 	int ts_state;
158 	int err;
159 
160 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
161 	err = crypto_shash_export(&dctx->fallback, &state);
162 	if (err)
163 		goto out;
164 
165 	if (state.count + count > ULONG_MAX)
166 		return crypto_shash_finup(&dctx->fallback, in, count, out);
167 
168 	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
169 	space =  SHA256_BLOCK_SIZE - leftover;
170 	if (space) {
171 		if (count > space) {
172 			err = crypto_shash_update(&dctx->fallback, in, space) ?:
173 			      crypto_shash_export(&dctx->fallback, &state);
174 			if (err)
175 				goto out;
176 			count -= space;
177 			in += space;
178 		} else {
179 			memcpy(state.buf + leftover, in, count);
180 			in = state.buf;
181 			count += leftover;
182 			state.count &= ~(SHA1_BLOCK_SIZE - 1);
183 		}
184 	}
185 
186 	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
187 
188 	/* prevent taking the spurious DNA fault with padlock. */
189 	ts_state = irq_ts_save();
190 	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
191 		      : \
192 		      : "c"((unsigned long)state.count + count), \
193 			"a"((unsigned long)state.count), \
194 			"S"(in), "D"(result));
195 	irq_ts_restore(ts_state);
196 
197 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
198 
199 out:
200 	return err;
201 }
202 
203 static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
204 {
205 	u8 buf[4];
206 
207 	return padlock_sha256_finup(desc, buf, 0, out);
208 }
209 
210 static int padlock_cra_init(struct crypto_tfm *tfm)
211 {
212 	struct crypto_shash *hash = __crypto_shash_cast(tfm);
213 	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
214 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
215 	struct crypto_shash *fallback_tfm;
216 	int err = -ENOMEM;
217 
218 	/* Allocate a fallback and abort if it failed. */
219 	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
220 					  CRYPTO_ALG_NEED_FALLBACK);
221 	if (IS_ERR(fallback_tfm)) {
222 		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
223 		       fallback_driver_name);
224 		err = PTR_ERR(fallback_tfm);
225 		goto out;
226 	}
227 
228 	ctx->fallback = fallback_tfm;
229 	hash->descsize += crypto_shash_descsize(fallback_tfm);
230 	return 0;
231 
232 out:
233 	return err;
234 }
235 
236 static void padlock_cra_exit(struct crypto_tfm *tfm)
237 {
238 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
239 
240 	crypto_free_shash(ctx->fallback);
241 }
242 
243 static struct shash_alg sha1_alg = {
244 	.digestsize	=	SHA1_DIGEST_SIZE,
245 	.init   	= 	padlock_sha_init,
246 	.update 	=	padlock_sha_update,
247 	.finup  	=	padlock_sha1_finup,
248 	.final  	=	padlock_sha1_final,
249 	.export		=	padlock_sha_export,
250 	.import		=	padlock_sha_import,
251 	.descsize	=	sizeof(struct padlock_sha_desc),
252 	.statesize	=	sizeof(struct sha1_state),
253 	.base		=	{
254 		.cra_name		=	"sha1",
255 		.cra_driver_name	=	"sha1-padlock",
256 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
257 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
258 						CRYPTO_ALG_NEED_FALLBACK,
259 		.cra_blocksize		=	SHA1_BLOCK_SIZE,
260 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
261 		.cra_module		=	THIS_MODULE,
262 		.cra_init		=	padlock_cra_init,
263 		.cra_exit		=	padlock_cra_exit,
264 	}
265 };
266 
267 static struct shash_alg sha256_alg = {
268 	.digestsize	=	SHA256_DIGEST_SIZE,
269 	.init   	= 	padlock_sha_init,
270 	.update 	=	padlock_sha_update,
271 	.finup  	=	padlock_sha256_finup,
272 	.final  	=	padlock_sha256_final,
273 	.export		=	padlock_sha_export,
274 	.import		=	padlock_sha_import,
275 	.descsize	=	sizeof(struct padlock_sha_desc),
276 	.statesize	=	sizeof(struct sha256_state),
277 	.base		=	{
278 		.cra_name		=	"sha256",
279 		.cra_driver_name	=	"sha256-padlock",
280 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
281 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
282 						CRYPTO_ALG_NEED_FALLBACK,
283 		.cra_blocksize		=	SHA256_BLOCK_SIZE,
284 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
285 		.cra_module		=	THIS_MODULE,
286 		.cra_init		=	padlock_cra_init,
287 		.cra_exit		=	padlock_cra_exit,
288 	}
289 };
290 
291 static int __init padlock_init(void)
292 {
293 	int rc = -ENODEV;
294 
295 	if (!cpu_has_phe) {
296 		printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
297 		return -ENODEV;
298 	}
299 
300 	if (!cpu_has_phe_enabled) {
301 		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
302 		return -ENODEV;
303 	}
304 
305 	rc = crypto_register_shash(&sha1_alg);
306 	if (rc)
307 		goto out;
308 
309 	rc = crypto_register_shash(&sha256_alg);
310 	if (rc)
311 		goto out_unreg1;
312 
313 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
314 
315 	return 0;
316 
317 out_unreg1:
318 	crypto_unregister_shash(&sha1_alg);
319 out:
320 	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
321 	return rc;
322 }
323 
324 static void __exit padlock_fini(void)
325 {
326 	crypto_unregister_shash(&sha1_alg);
327 	crypto_unregister_shash(&sha256_alg);
328 }
329 
330 module_init(padlock_init);
331 module_exit(padlock_fini);
332 
333 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
334 MODULE_LICENSE("GPL");
335 MODULE_AUTHOR("Michal Ludvig");
336 
337 MODULE_ALIAS("sha1-all");
338 MODULE_ALIAS("sha256-all");
339 MODULE_ALIAS("sha1-padlock");
340 MODULE_ALIAS("sha256-padlock");
341