1 /* 2 * Glue code for SHA-256 implementation for SPE instructions (PPC) 3 * 4 * Based on generic implementation. The assembler module takes care 5 * about the SPE registers so it can run from interrupt context. 6 * 7 * Copyright (c) 2015 Markus Stockhausen <stockhausen@collogia.de> 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the Free 11 * Software Foundation; either version 2 of the License, or (at your option) 12 * any later version. 13 * 14 */ 15 16 #include <crypto/internal/hash.h> 17 #include <linux/init.h> 18 #include <linux/module.h> 19 #include <linux/mm.h> 20 #include <linux/cryptohash.h> 21 #include <linux/types.h> 22 #include <crypto/sha.h> 23 #include <asm/byteorder.h> 24 #include <asm/switch_to.h> 25 #include <linux/hardirq.h> 26 27 /* 28 * MAX_BYTES defines the number of bytes that are allowed to be processed 29 * between preempt_disable() and preempt_enable(). SHA256 takes ~2,000 30 * operations per 64 bytes. e500 cores can issue two arithmetic instructions 31 * per clock cycle using one 32/64 bit unit (SU1) and one 32 bit unit (SU2). 32 * Thus 1KB of input data will need an estimated maximum of 18,000 cycles. 33 * Headroom for cache misses included. Even with the low end model clocked 34 * at 667 MHz this equals to a critical time window of less than 27us. 35 * 36 */ 37 #define MAX_BYTES 1024 38 39 extern void ppc_spe_sha256_transform(u32 *state, const u8 *src, u32 blocks); 40 41 static void spe_begin(void) 42 { 43 /* We just start SPE operations and will save SPE registers later. */ 44 preempt_disable(); 45 enable_kernel_spe(); 46 } 47 48 static void spe_end(void) 49 { 50 disable_kernel_spe(); 51 /* reenable preemption */ 52 preempt_enable(); 53 } 54 55 static inline void ppc_sha256_clear_context(struct sha256_state *sctx) 56 { 57 int count = sizeof(struct sha256_state) >> 2; 58 u32 *ptr = (u32 *)sctx; 59 60 /* make sure we can clear the fast way */ 61 BUILD_BUG_ON(sizeof(struct sha256_state) % 4); 62 do { *ptr++ = 0; } while (--count); 63 } 64 65 static int ppc_spe_sha256_init(struct shash_desc *desc) 66 { 67 struct sha256_state *sctx = shash_desc_ctx(desc); 68 69 sctx->state[0] = SHA256_H0; 70 sctx->state[1] = SHA256_H1; 71 sctx->state[2] = SHA256_H2; 72 sctx->state[3] = SHA256_H3; 73 sctx->state[4] = SHA256_H4; 74 sctx->state[5] = SHA256_H5; 75 sctx->state[6] = SHA256_H6; 76 sctx->state[7] = SHA256_H7; 77 sctx->count = 0; 78 79 return 0; 80 } 81 82 static int ppc_spe_sha224_init(struct shash_desc *desc) 83 { 84 struct sha256_state *sctx = shash_desc_ctx(desc); 85 86 sctx->state[0] = SHA224_H0; 87 sctx->state[1] = SHA224_H1; 88 sctx->state[2] = SHA224_H2; 89 sctx->state[3] = SHA224_H3; 90 sctx->state[4] = SHA224_H4; 91 sctx->state[5] = SHA224_H5; 92 sctx->state[6] = SHA224_H6; 93 sctx->state[7] = SHA224_H7; 94 sctx->count = 0; 95 96 return 0; 97 } 98 99 static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data, 100 unsigned int len) 101 { 102 struct sha256_state *sctx = shash_desc_ctx(desc); 103 const unsigned int offset = sctx->count & 0x3f; 104 const unsigned int avail = 64 - offset; 105 unsigned int bytes; 106 const u8 *src = data; 107 108 if (avail > len) { 109 sctx->count += len; 110 memcpy((char *)sctx->buf + offset, src, len); 111 return 0; 112 } 113 114 sctx->count += len; 115 116 if (offset) { 117 memcpy((char *)sctx->buf + offset, src, avail); 118 119 spe_begin(); 120 ppc_spe_sha256_transform(sctx->state, (const u8 *)sctx->buf, 1); 121 spe_end(); 122 123 len -= avail; 124 src += avail; 125 } 126 127 while (len > 63) { 128 /* cut input data into smaller blocks */ 129 bytes = (len > MAX_BYTES) ? MAX_BYTES : len; 130 bytes = bytes & ~0x3f; 131 132 spe_begin(); 133 ppc_spe_sha256_transform(sctx->state, src, bytes >> 6); 134 spe_end(); 135 136 src += bytes; 137 len -= bytes; 138 }; 139 140 memcpy((char *)sctx->buf, src, len); 141 return 0; 142 } 143 144 static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out) 145 { 146 struct sha256_state *sctx = shash_desc_ctx(desc); 147 const unsigned int offset = sctx->count & 0x3f; 148 char *p = (char *)sctx->buf + offset; 149 int padlen; 150 __be64 *pbits = (__be64 *)(((char *)&sctx->buf) + 56); 151 __be32 *dst = (__be32 *)out; 152 153 padlen = 55 - offset; 154 *p++ = 0x80; 155 156 spe_begin(); 157 158 if (padlen < 0) { 159 memset(p, 0x00, padlen + sizeof (u64)); 160 ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); 161 p = (char *)sctx->buf; 162 padlen = 56; 163 } 164 165 memset(p, 0, padlen); 166 *pbits = cpu_to_be64(sctx->count << 3); 167 ppc_spe_sha256_transform(sctx->state, sctx->buf, 1); 168 169 spe_end(); 170 171 dst[0] = cpu_to_be32(sctx->state[0]); 172 dst[1] = cpu_to_be32(sctx->state[1]); 173 dst[2] = cpu_to_be32(sctx->state[2]); 174 dst[3] = cpu_to_be32(sctx->state[3]); 175 dst[4] = cpu_to_be32(sctx->state[4]); 176 dst[5] = cpu_to_be32(sctx->state[5]); 177 dst[6] = cpu_to_be32(sctx->state[6]); 178 dst[7] = cpu_to_be32(sctx->state[7]); 179 180 ppc_sha256_clear_context(sctx); 181 return 0; 182 } 183 184 static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out) 185 { 186 u32 D[SHA256_DIGEST_SIZE >> 2]; 187 __be32 *dst = (__be32 *)out; 188 189 ppc_spe_sha256_final(desc, (u8 *)D); 190 191 /* avoid bytewise memcpy */ 192 dst[0] = D[0]; 193 dst[1] = D[1]; 194 dst[2] = D[2]; 195 dst[3] = D[3]; 196 dst[4] = D[4]; 197 dst[5] = D[5]; 198 dst[6] = D[6]; 199 200 /* clear sensitive data */ 201 memzero_explicit(D, SHA256_DIGEST_SIZE); 202 return 0; 203 } 204 205 static int ppc_spe_sha256_export(struct shash_desc *desc, void *out) 206 { 207 struct sha256_state *sctx = shash_desc_ctx(desc); 208 209 memcpy(out, sctx, sizeof(*sctx)); 210 return 0; 211 } 212 213 static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in) 214 { 215 struct sha256_state *sctx = shash_desc_ctx(desc); 216 217 memcpy(sctx, in, sizeof(*sctx)); 218 return 0; 219 } 220 221 static struct shash_alg algs[2] = { { 222 .digestsize = SHA256_DIGEST_SIZE, 223 .init = ppc_spe_sha256_init, 224 .update = ppc_spe_sha256_update, 225 .final = ppc_spe_sha256_final, 226 .export = ppc_spe_sha256_export, 227 .import = ppc_spe_sha256_import, 228 .descsize = sizeof(struct sha256_state), 229 .statesize = sizeof(struct sha256_state), 230 .base = { 231 .cra_name = "sha256", 232 .cra_driver_name= "sha256-ppc-spe", 233 .cra_priority = 300, 234 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 235 .cra_blocksize = SHA256_BLOCK_SIZE, 236 .cra_module = THIS_MODULE, 237 } 238 }, { 239 .digestsize = SHA224_DIGEST_SIZE, 240 .init = ppc_spe_sha224_init, 241 .update = ppc_spe_sha256_update, 242 .final = ppc_spe_sha224_final, 243 .export = ppc_spe_sha256_export, 244 .import = ppc_spe_sha256_import, 245 .descsize = sizeof(struct sha256_state), 246 .statesize = sizeof(struct sha256_state), 247 .base = { 248 .cra_name = "sha224", 249 .cra_driver_name= "sha224-ppc-spe", 250 .cra_priority = 300, 251 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 252 .cra_blocksize = SHA224_BLOCK_SIZE, 253 .cra_module = THIS_MODULE, 254 } 255 } }; 256 257 static int __init ppc_spe_sha256_mod_init(void) 258 { 259 return crypto_register_shashes(algs, ARRAY_SIZE(algs)); 260 } 261 262 static void __exit ppc_spe_sha256_mod_fini(void) 263 { 264 crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 265 } 266 267 module_init(ppc_spe_sha256_mod_init); 268 module_exit(ppc_spe_sha256_mod_fini); 269 270 MODULE_LICENSE("GPL"); 271 MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, SPE optimized"); 272 273 MODULE_ALIAS_CRYPTO("sha224"); 274 MODULE_ALIAS_CRYPTO("sha224-ppc-spe"); 275 MODULE_ALIAS_CRYPTO("sha256"); 276 MODULE_ALIAS_CRYPTO("sha256-ppc-spe"); 277