1 /* 2 * Bit sliced AES using NEON instructions 3 * 4 * Copyright (C) 2017 Linaro Ltd <ard.biesheuvel@linaro.org> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <asm/neon.h> 12 #include <crypto/aes.h> 13 #include <crypto/cbc.h> 14 #include <crypto/internal/simd.h> 15 #include <crypto/internal/skcipher.h> 16 #include <crypto/xts.h> 17 #include <linux/module.h> 18 19 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 20 MODULE_LICENSE("GPL v2"); 21 22 MODULE_ALIAS_CRYPTO("ecb(aes)"); 23 MODULE_ALIAS_CRYPTO("cbc(aes)"); 24 MODULE_ALIAS_CRYPTO("ctr(aes)"); 25 MODULE_ALIAS_CRYPTO("xts(aes)"); 26 27 asmlinkage void aesbs_convert_key(u8 out[], u32 const rk[], int rounds); 28 29 asmlinkage void aesbs_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[], 30 int rounds, int blocks); 31 asmlinkage void aesbs_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[], 32 int rounds, int blocks); 33 34 asmlinkage void aesbs_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[], 35 int rounds, int blocks, u8 iv[]); 36 37 asmlinkage void aesbs_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], 38 int rounds, int blocks, u8 ctr[], u8 final[]); 39 40 asmlinkage void aesbs_xts_encrypt(u8 out[], u8 const in[], u8 const rk[], 41 int rounds, int blocks, u8 iv[]); 42 asmlinkage void aesbs_xts_decrypt(u8 out[], u8 const in[], u8 const rk[], 43 int rounds, int blocks, u8 iv[]); 44 45 struct aesbs_ctx { 46 int rounds; 47 u8 rk[13 * (8 * AES_BLOCK_SIZE) + 32] __aligned(AES_BLOCK_SIZE); 48 }; 49 50 struct aesbs_cbc_ctx { 51 struct aesbs_ctx key; 52 struct crypto_cipher *enc_tfm; 53 }; 54 55 struct aesbs_xts_ctx { 56 struct aesbs_ctx key; 57 struct crypto_cipher *tweak_tfm; 58 }; 59 60 static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 61 unsigned int key_len) 62 { 63 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); 64 struct crypto_aes_ctx rk; 65 int err; 66 67 err = crypto_aes_expand_key(&rk, in_key, key_len); 68 if (err) 69 return err; 70 71 ctx->rounds = 6 + key_len / 4; 72 73 kernel_neon_begin(); 74 aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds); 75 kernel_neon_end(); 76 77 return 0; 78 } 79 80 static int __ecb_crypt(struct skcipher_request *req, 81 void (*fn)(u8 out[], u8 const in[], u8 const rk[], 82 int rounds, int blocks)) 83 { 84 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 85 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); 86 struct skcipher_walk walk; 87 int err; 88 89 err = skcipher_walk_virt(&walk, req, true); 90 91 kernel_neon_begin(); 92 while (walk.nbytes >= AES_BLOCK_SIZE) { 93 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 94 95 if (walk.nbytes < walk.total) 96 blocks = round_down(blocks, 97 walk.stride / AES_BLOCK_SIZE); 98 99 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->rk, 100 ctx->rounds, blocks); 101 err = skcipher_walk_done(&walk, 102 walk.nbytes - blocks * AES_BLOCK_SIZE); 103 } 104 kernel_neon_end(); 105 106 return err; 107 } 108 109 static int ecb_encrypt(struct skcipher_request *req) 110 { 111 return __ecb_crypt(req, aesbs_ecb_encrypt); 112 } 113 114 static int ecb_decrypt(struct skcipher_request *req) 115 { 116 return __ecb_crypt(req, aesbs_ecb_decrypt); 117 } 118 119 static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 120 unsigned int key_len) 121 { 122 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 123 struct crypto_aes_ctx rk; 124 int err; 125 126 err = crypto_aes_expand_key(&rk, in_key, key_len); 127 if (err) 128 return err; 129 130 ctx->key.rounds = 6 + key_len / 4; 131 132 kernel_neon_begin(); 133 aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds); 134 kernel_neon_end(); 135 136 return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len); 137 } 138 139 static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) 140 { 141 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 142 143 crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src); 144 } 145 146 static int cbc_encrypt(struct skcipher_request *req) 147 { 148 return crypto_cbc_encrypt_walk(req, cbc_encrypt_one); 149 } 150 151 static int cbc_decrypt(struct skcipher_request *req) 152 { 153 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 154 struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); 155 struct skcipher_walk walk; 156 int err; 157 158 err = skcipher_walk_virt(&walk, req, true); 159 160 kernel_neon_begin(); 161 while (walk.nbytes >= AES_BLOCK_SIZE) { 162 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 163 164 if (walk.nbytes < walk.total) 165 blocks = round_down(blocks, 166 walk.stride / AES_BLOCK_SIZE); 167 168 aesbs_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, 169 ctx->key.rk, ctx->key.rounds, blocks, 170 walk.iv); 171 err = skcipher_walk_done(&walk, 172 walk.nbytes - blocks * AES_BLOCK_SIZE); 173 } 174 kernel_neon_end(); 175 176 return err; 177 } 178 179 static int cbc_init(struct crypto_tfm *tfm) 180 { 181 struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 182 183 ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); 184 if (IS_ERR(ctx->enc_tfm)) 185 return PTR_ERR(ctx->enc_tfm); 186 return 0; 187 } 188 189 static void cbc_exit(struct crypto_tfm *tfm) 190 { 191 struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); 192 193 crypto_free_cipher(ctx->enc_tfm); 194 } 195 196 static int ctr_encrypt(struct skcipher_request *req) 197 { 198 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 199 struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm); 200 struct skcipher_walk walk; 201 u8 buf[AES_BLOCK_SIZE]; 202 int err; 203 204 err = skcipher_walk_virt(&walk, req, true); 205 206 kernel_neon_begin(); 207 while (walk.nbytes > 0) { 208 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 209 u8 *final = (walk.total % AES_BLOCK_SIZE) ? buf : NULL; 210 211 if (walk.nbytes < walk.total) { 212 blocks = round_down(blocks, 213 walk.stride / AES_BLOCK_SIZE); 214 final = NULL; 215 } 216 217 aesbs_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr, 218 ctx->rk, ctx->rounds, blocks, walk.iv, final); 219 220 if (final) { 221 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; 222 u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; 223 224 crypto_xor_cpy(dst, src, final, 225 walk.total % AES_BLOCK_SIZE); 226 227 err = skcipher_walk_done(&walk, 0); 228 break; 229 } 230 err = skcipher_walk_done(&walk, 231 walk.nbytes - blocks * AES_BLOCK_SIZE); 232 } 233 kernel_neon_end(); 234 235 return err; 236 } 237 238 static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 239 unsigned int key_len) 240 { 241 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 242 int err; 243 244 err = xts_verify_key(tfm, in_key, key_len); 245 if (err) 246 return err; 247 248 key_len /= 2; 249 err = crypto_cipher_setkey(ctx->tweak_tfm, in_key + key_len, key_len); 250 if (err) 251 return err; 252 253 return aesbs_setkey(tfm, in_key, key_len); 254 } 255 256 static int xts_init(struct crypto_tfm *tfm) 257 { 258 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 259 260 ctx->tweak_tfm = crypto_alloc_cipher("aes", 0, 0); 261 if (IS_ERR(ctx->tweak_tfm)) 262 return PTR_ERR(ctx->tweak_tfm); 263 return 0; 264 } 265 266 static void xts_exit(struct crypto_tfm *tfm) 267 { 268 struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); 269 270 crypto_free_cipher(ctx->tweak_tfm); 271 } 272 273 static int __xts_crypt(struct skcipher_request *req, 274 void (*fn)(u8 out[], u8 const in[], u8 const rk[], 275 int rounds, int blocks, u8 iv[])) 276 { 277 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 278 struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 279 struct skcipher_walk walk; 280 int err; 281 282 err = skcipher_walk_virt(&walk, req, true); 283 284 crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); 285 286 kernel_neon_begin(); 287 while (walk.nbytes >= AES_BLOCK_SIZE) { 288 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; 289 290 if (walk.nbytes < walk.total) 291 blocks = round_down(blocks, 292 walk.stride / AES_BLOCK_SIZE); 293 294 fn(walk.dst.virt.addr, walk.src.virt.addr, ctx->key.rk, 295 ctx->key.rounds, blocks, walk.iv); 296 err = skcipher_walk_done(&walk, 297 walk.nbytes - blocks * AES_BLOCK_SIZE); 298 } 299 kernel_neon_end(); 300 301 return err; 302 } 303 304 static int xts_encrypt(struct skcipher_request *req) 305 { 306 return __xts_crypt(req, aesbs_xts_encrypt); 307 } 308 309 static int xts_decrypt(struct skcipher_request *req) 310 { 311 return __xts_crypt(req, aesbs_xts_decrypt); 312 } 313 314 static struct skcipher_alg aes_algs[] = { { 315 .base.cra_name = "__ecb(aes)", 316 .base.cra_driver_name = "__ecb-aes-neonbs", 317 .base.cra_priority = 250, 318 .base.cra_blocksize = AES_BLOCK_SIZE, 319 .base.cra_ctxsize = sizeof(struct aesbs_ctx), 320 .base.cra_module = THIS_MODULE, 321 .base.cra_flags = CRYPTO_ALG_INTERNAL, 322 323 .min_keysize = AES_MIN_KEY_SIZE, 324 .max_keysize = AES_MAX_KEY_SIZE, 325 .walksize = 8 * AES_BLOCK_SIZE, 326 .setkey = aesbs_setkey, 327 .encrypt = ecb_encrypt, 328 .decrypt = ecb_decrypt, 329 }, { 330 .base.cra_name = "__cbc(aes)", 331 .base.cra_driver_name = "__cbc-aes-neonbs", 332 .base.cra_priority = 250, 333 .base.cra_blocksize = AES_BLOCK_SIZE, 334 .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), 335 .base.cra_module = THIS_MODULE, 336 .base.cra_flags = CRYPTO_ALG_INTERNAL, 337 .base.cra_init = cbc_init, 338 .base.cra_exit = cbc_exit, 339 340 .min_keysize = AES_MIN_KEY_SIZE, 341 .max_keysize = AES_MAX_KEY_SIZE, 342 .walksize = 8 * AES_BLOCK_SIZE, 343 .ivsize = AES_BLOCK_SIZE, 344 .setkey = aesbs_cbc_setkey, 345 .encrypt = cbc_encrypt, 346 .decrypt = cbc_decrypt, 347 }, { 348 .base.cra_name = "__ctr(aes)", 349 .base.cra_driver_name = "__ctr-aes-neonbs", 350 .base.cra_priority = 250, 351 .base.cra_blocksize = 1, 352 .base.cra_ctxsize = sizeof(struct aesbs_ctx), 353 .base.cra_module = THIS_MODULE, 354 .base.cra_flags = CRYPTO_ALG_INTERNAL, 355 356 .min_keysize = AES_MIN_KEY_SIZE, 357 .max_keysize = AES_MAX_KEY_SIZE, 358 .chunksize = AES_BLOCK_SIZE, 359 .walksize = 8 * AES_BLOCK_SIZE, 360 .ivsize = AES_BLOCK_SIZE, 361 .setkey = aesbs_setkey, 362 .encrypt = ctr_encrypt, 363 .decrypt = ctr_encrypt, 364 }, { 365 .base.cra_name = "__xts(aes)", 366 .base.cra_driver_name = "__xts-aes-neonbs", 367 .base.cra_priority = 250, 368 .base.cra_blocksize = AES_BLOCK_SIZE, 369 .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), 370 .base.cra_module = THIS_MODULE, 371 .base.cra_flags = CRYPTO_ALG_INTERNAL, 372 .base.cra_init = xts_init, 373 .base.cra_exit = xts_exit, 374 375 .min_keysize = 2 * AES_MIN_KEY_SIZE, 376 .max_keysize = 2 * AES_MAX_KEY_SIZE, 377 .walksize = 8 * AES_BLOCK_SIZE, 378 .ivsize = AES_BLOCK_SIZE, 379 .setkey = aesbs_xts_setkey, 380 .encrypt = xts_encrypt, 381 .decrypt = xts_decrypt, 382 } }; 383 384 static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)]; 385 386 static void aes_exit(void) 387 { 388 int i; 389 390 for (i = 0; i < ARRAY_SIZE(aes_simd_algs); i++) 391 if (aes_simd_algs[i]) 392 simd_skcipher_free(aes_simd_algs[i]); 393 394 crypto_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); 395 } 396 397 static int __init aes_init(void) 398 { 399 struct simd_skcipher_alg *simd; 400 const char *basename; 401 const char *algname; 402 const char *drvname; 403 int err; 404 int i; 405 406 if (!(elf_hwcap & HWCAP_NEON)) 407 return -ENODEV; 408 409 err = crypto_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs)); 410 if (err) 411 return err; 412 413 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 414 if (!(aes_algs[i].base.cra_flags & CRYPTO_ALG_INTERNAL)) 415 continue; 416 417 algname = aes_algs[i].base.cra_name + 2; 418 drvname = aes_algs[i].base.cra_driver_name + 2; 419 basename = aes_algs[i].base.cra_driver_name; 420 simd = simd_skcipher_create_compat(algname, drvname, basename); 421 err = PTR_ERR(simd); 422 if (IS_ERR(simd)) 423 goto unregister_simds; 424 425 aes_simd_algs[i] = simd; 426 } 427 return 0; 428 429 unregister_simds: 430 aes_exit(); 431 return err; 432 } 433 434 late_initcall(aes_init); 435 module_exit(aes_exit); 436