xref: /openbmc/linux/drivers/crypto/padlock-sha.c (revision e8e0929d)
1 /*
2  * Cryptographic API.
3  *
4  * Support for VIA PadLock hardware crypto engine.
5  *
6  * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  */
14 
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/scatterlist.h>
24 #include <asm/i387.h>
25 #include "padlock.h"
26 
27 struct padlock_sha_desc {
28 	struct shash_desc fallback;
29 };
30 
31 struct padlock_sha_ctx {
32 	struct crypto_shash *fallback;
33 };
34 
35 static int padlock_sha_init(struct shash_desc *desc)
36 {
37 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
38 	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
39 
40 	dctx->fallback.tfm = ctx->fallback;
41 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
42 	return crypto_shash_init(&dctx->fallback);
43 }
44 
45 static int padlock_sha_update(struct shash_desc *desc,
46 			      const u8 *data, unsigned int length)
47 {
48 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
49 
50 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
51 	return crypto_shash_update(&dctx->fallback, data, length);
52 }
53 
54 static inline void padlock_output_block(uint32_t *src,
55 		 	uint32_t *dst, size_t count)
56 {
57 	while (count--)
58 		*dst++ = swab32(*src++);
59 }
60 
61 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
62 			      unsigned int count, u8 *out)
63 {
64 	/* We can't store directly to *out as it may be unaligned. */
65 	/* BTW Don't reduce the buffer size below 128 Bytes!
66 	 *     PadLock microcode needs it that big. */
67 	char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
68 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
69 	struct sha1_state state;
70 	unsigned int space;
71 	unsigned int leftover;
72 	int ts_state;
73 	int err;
74 
75 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
76 	err = crypto_shash_export(&dctx->fallback, &state);
77 	if (err)
78 		goto out;
79 
80 	if (state.count + count > ULONG_MAX)
81 		return crypto_shash_finup(&dctx->fallback, in, count, out);
82 
83 	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
84 	space =  SHA1_BLOCK_SIZE - leftover;
85 	if (space) {
86 		if (count > space) {
87 			err = crypto_shash_update(&dctx->fallback, in, space) ?:
88 			      crypto_shash_export(&dctx->fallback, &state);
89 			if (err)
90 				goto out;
91 			count -= space;
92 			in += space;
93 		} else {
94 			memcpy(state.buffer + leftover, in, count);
95 			in = state.buffer;
96 			count += leftover;
97 			state.count &= ~(SHA1_BLOCK_SIZE - 1);
98 		}
99 	}
100 
101 	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
102 
103 	/* prevent taking the spurious DNA fault with padlock. */
104 	ts_state = irq_ts_save();
105 	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
106 		      : \
107 		      : "c"((unsigned long)state.count + count), \
108 			"a"((unsigned long)state.count), \
109 			"S"(in), "D"(result));
110 	irq_ts_restore(ts_state);
111 
112 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
113 
114 out:
115 	return err;
116 }
117 
118 static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
119 {
120 	u8 buf[4];
121 
122 	return padlock_sha1_finup(desc, buf, 0, out);
123 }
124 
125 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
126 				unsigned int count, u8 *out)
127 {
128 	/* We can't store directly to *out as it may be unaligned. */
129 	/* BTW Don't reduce the buffer size below 128 Bytes!
130 	 *     PadLock microcode needs it that big. */
131 	char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
132 	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
133 	struct sha256_state state;
134 	unsigned int space;
135 	unsigned int leftover;
136 	int ts_state;
137 	int err;
138 
139 	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
140 	err = crypto_shash_export(&dctx->fallback, &state);
141 	if (err)
142 		goto out;
143 
144 	if (state.count + count > ULONG_MAX)
145 		return crypto_shash_finup(&dctx->fallback, in, count, out);
146 
147 	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
148 	space =  SHA256_BLOCK_SIZE - leftover;
149 	if (space) {
150 		if (count > space) {
151 			err = crypto_shash_update(&dctx->fallback, in, space) ?:
152 			      crypto_shash_export(&dctx->fallback, &state);
153 			if (err)
154 				goto out;
155 			count -= space;
156 			in += space;
157 		} else {
158 			memcpy(state.buf + leftover, in, count);
159 			in = state.buf;
160 			count += leftover;
161 			state.count &= ~(SHA1_BLOCK_SIZE - 1);
162 		}
163 	}
164 
165 	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
166 
167 	/* prevent taking the spurious DNA fault with padlock. */
168 	ts_state = irq_ts_save();
169 	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
170 		      : \
171 		      : "c"((unsigned long)state.count + count), \
172 			"a"((unsigned long)state.count), \
173 			"S"(in), "D"(result));
174 	irq_ts_restore(ts_state);
175 
176 	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
177 
178 out:
179 	return err;
180 }
181 
182 static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
183 {
184 	u8 buf[4];
185 
186 	return padlock_sha256_finup(desc, buf, 0, out);
187 }
188 
189 static int padlock_cra_init(struct crypto_tfm *tfm)
190 {
191 	struct crypto_shash *hash = __crypto_shash_cast(tfm);
192 	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
193 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
194 	struct crypto_shash *fallback_tfm;
195 	int err = -ENOMEM;
196 
197 	/* Allocate a fallback and abort if it failed. */
198 	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
199 					  CRYPTO_ALG_NEED_FALLBACK);
200 	if (IS_ERR(fallback_tfm)) {
201 		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
202 		       fallback_driver_name);
203 		err = PTR_ERR(fallback_tfm);
204 		goto out;
205 	}
206 
207 	ctx->fallback = fallback_tfm;
208 	hash->descsize += crypto_shash_descsize(fallback_tfm);
209 	return 0;
210 
211 out:
212 	return err;
213 }
214 
215 static void padlock_cra_exit(struct crypto_tfm *tfm)
216 {
217 	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
218 
219 	crypto_free_shash(ctx->fallback);
220 }
221 
222 static struct shash_alg sha1_alg = {
223 	.digestsize	=	SHA1_DIGEST_SIZE,
224 	.init   	= 	padlock_sha_init,
225 	.update 	=	padlock_sha_update,
226 	.finup  	=	padlock_sha1_finup,
227 	.final  	=	padlock_sha1_final,
228 	.descsize	=	sizeof(struct padlock_sha_desc),
229 	.base		=	{
230 		.cra_name		=	"sha1",
231 		.cra_driver_name	=	"sha1-padlock",
232 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
233 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
234 						CRYPTO_ALG_NEED_FALLBACK,
235 		.cra_blocksize		=	SHA1_BLOCK_SIZE,
236 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
237 		.cra_module		=	THIS_MODULE,
238 		.cra_init		=	padlock_cra_init,
239 		.cra_exit		=	padlock_cra_exit,
240 	}
241 };
242 
243 static struct shash_alg sha256_alg = {
244 	.digestsize	=	SHA256_DIGEST_SIZE,
245 	.init   	= 	padlock_sha_init,
246 	.update 	=	padlock_sha_update,
247 	.finup  	=	padlock_sha256_finup,
248 	.final  	=	padlock_sha256_final,
249 	.descsize	=	sizeof(struct padlock_sha_desc),
250 	.base		=	{
251 		.cra_name		=	"sha256",
252 		.cra_driver_name	=	"sha256-padlock",
253 		.cra_priority		=	PADLOCK_CRA_PRIORITY,
254 		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
255 						CRYPTO_ALG_NEED_FALLBACK,
256 		.cra_blocksize		=	SHA256_BLOCK_SIZE,
257 		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
258 		.cra_module		=	THIS_MODULE,
259 		.cra_init		=	padlock_cra_init,
260 		.cra_exit		=	padlock_cra_exit,
261 	}
262 };
263 
264 static int __init padlock_init(void)
265 {
266 	int rc = -ENODEV;
267 
268 	if (!cpu_has_phe) {
269 		printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
270 		return -ENODEV;
271 	}
272 
273 	if (!cpu_has_phe_enabled) {
274 		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
275 		return -ENODEV;
276 	}
277 
278 	rc = crypto_register_shash(&sha1_alg);
279 	if (rc)
280 		goto out;
281 
282 	rc = crypto_register_shash(&sha256_alg);
283 	if (rc)
284 		goto out_unreg1;
285 
286 	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
287 
288 	return 0;
289 
290 out_unreg1:
291 	crypto_unregister_shash(&sha1_alg);
292 out:
293 	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
294 	return rc;
295 }
296 
297 static void __exit padlock_fini(void)
298 {
299 	crypto_unregister_shash(&sha1_alg);
300 	crypto_unregister_shash(&sha256_alg);
301 }
302 
303 module_init(padlock_init);
304 module_exit(padlock_fini);
305 
306 MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
307 MODULE_LICENSE("GPL");
308 MODULE_AUTHOR("Michal Ludvig");
309 
310 MODULE_ALIAS("sha1-all");
311 MODULE_ALIAS("sha256-all");
312 MODULE_ALIAS("sha1-padlock");
313 MODULE_ALIAS("sha256-padlock");
314