1 /* 2 * ARM NEON accelerated ChaCha and XChaCha stream ciphers, 3 * including ChaCha20 (RFC7539) 4 * 5 * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * Based on: 12 * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code 13 * 14 * Copyright (C) 2015 Martin Willi 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License as published by 18 * the Free Software Foundation; either version 2 of the License, or 19 * (at your option) any later version. 20 */ 21 22 #include <crypto/algapi.h> 23 #include <crypto/chacha.h> 24 #include <crypto/internal/skcipher.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 28 #include <asm/hwcap.h> 29 #include <asm/neon.h> 30 #include <asm/simd.h> 31 32 asmlinkage void chacha_block_xor_neon(u32 *state, u8 *dst, const u8 *src, 33 int nrounds); 34 asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src, 35 int nrounds, int bytes); 36 asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); 37 38 static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, 39 int bytes, int nrounds) 40 { 41 while (bytes > 0) { 42 int l = min(bytes, CHACHA_BLOCK_SIZE * 5); 43 44 if (l <= CHACHA_BLOCK_SIZE) { 45 u8 buf[CHACHA_BLOCK_SIZE]; 46 47 memcpy(buf, src, l); 48 chacha_block_xor_neon(state, buf, buf, nrounds); 49 memcpy(dst, buf, l); 50 state[12] += 1; 51 break; 52 } 53 chacha_4block_xor_neon(state, dst, src, nrounds, l); 54 bytes -= CHACHA_BLOCK_SIZE * 5; 55 src += CHACHA_BLOCK_SIZE * 5; 56 dst += CHACHA_BLOCK_SIZE * 5; 57 state[12] += 5; 58 } 59 } 60 61 static int chacha_neon_stream_xor(struct skcipher_request *req, 62 struct chacha_ctx *ctx, u8 *iv) 63 { 64 struct skcipher_walk walk; 65 u32 state[16]; 66 int err; 67 68 err = skcipher_walk_virt(&walk, req, false); 69 70 crypto_chacha_init(state, ctx, iv); 71 72 while (walk.nbytes > 0) { 73 unsigned int nbytes = walk.nbytes; 74 75 if (nbytes < walk.total) 76 nbytes = rounddown(nbytes, walk.stride); 77 78 kernel_neon_begin(); 79 chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, 80 nbytes, ctx->nrounds); 81 kernel_neon_end(); 82 err = skcipher_walk_done(&walk, walk.nbytes - nbytes); 83 } 84 85 return err; 86 } 87 88 static int chacha_neon(struct skcipher_request *req) 89 { 90 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 91 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); 92 93 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) 94 return crypto_chacha_crypt(req); 95 96 return chacha_neon_stream_xor(req, ctx, req->iv); 97 } 98 99 static int xchacha_neon(struct skcipher_request *req) 100 { 101 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 102 struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); 103 struct chacha_ctx subctx; 104 u32 state[16]; 105 u8 real_iv[16]; 106 107 if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) 108 return crypto_xchacha_crypt(req); 109 110 crypto_chacha_init(state, ctx, req->iv); 111 112 kernel_neon_begin(); 113 hchacha_block_neon(state, subctx.key, ctx->nrounds); 114 kernel_neon_end(); 115 subctx.nrounds = ctx->nrounds; 116 117 memcpy(&real_iv[0], req->iv + 24, 8); 118 memcpy(&real_iv[8], req->iv + 16, 8); 119 return chacha_neon_stream_xor(req, &subctx, real_iv); 120 } 121 122 static struct skcipher_alg algs[] = { 123 { 124 .base.cra_name = "chacha20", 125 .base.cra_driver_name = "chacha20-neon", 126 .base.cra_priority = 300, 127 .base.cra_blocksize = 1, 128 .base.cra_ctxsize = sizeof(struct chacha_ctx), 129 .base.cra_module = THIS_MODULE, 130 131 .min_keysize = CHACHA_KEY_SIZE, 132 .max_keysize = CHACHA_KEY_SIZE, 133 .ivsize = CHACHA_IV_SIZE, 134 .chunksize = CHACHA_BLOCK_SIZE, 135 .walksize = 5 * CHACHA_BLOCK_SIZE, 136 .setkey = crypto_chacha20_setkey, 137 .encrypt = chacha_neon, 138 .decrypt = chacha_neon, 139 }, { 140 .base.cra_name = "xchacha20", 141 .base.cra_driver_name = "xchacha20-neon", 142 .base.cra_priority = 300, 143 .base.cra_blocksize = 1, 144 .base.cra_ctxsize = sizeof(struct chacha_ctx), 145 .base.cra_module = THIS_MODULE, 146 147 .min_keysize = CHACHA_KEY_SIZE, 148 .max_keysize = CHACHA_KEY_SIZE, 149 .ivsize = XCHACHA_IV_SIZE, 150 .chunksize = CHACHA_BLOCK_SIZE, 151 .walksize = 5 * CHACHA_BLOCK_SIZE, 152 .setkey = crypto_chacha20_setkey, 153 .encrypt = xchacha_neon, 154 .decrypt = xchacha_neon, 155 }, { 156 .base.cra_name = "xchacha12", 157 .base.cra_driver_name = "xchacha12-neon", 158 .base.cra_priority = 300, 159 .base.cra_blocksize = 1, 160 .base.cra_ctxsize = sizeof(struct chacha_ctx), 161 .base.cra_module = THIS_MODULE, 162 163 .min_keysize = CHACHA_KEY_SIZE, 164 .max_keysize = CHACHA_KEY_SIZE, 165 .ivsize = XCHACHA_IV_SIZE, 166 .chunksize = CHACHA_BLOCK_SIZE, 167 .walksize = 5 * CHACHA_BLOCK_SIZE, 168 .setkey = crypto_chacha12_setkey, 169 .encrypt = xchacha_neon, 170 .decrypt = xchacha_neon, 171 } 172 }; 173 174 static int __init chacha_simd_mod_init(void) 175 { 176 if (!(elf_hwcap & HWCAP_ASIMD)) 177 return -ENODEV; 178 179 return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); 180 } 181 182 static void __exit chacha_simd_mod_fini(void) 183 { 184 crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); 185 } 186 187 module_init(chacha_simd_mod_init); 188 module_exit(chacha_simd_mod_fini); 189 190 MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)"); 191 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); 192 MODULE_LICENSE("GPL v2"); 193 MODULE_ALIAS_CRYPTO("chacha20"); 194 MODULE_ALIAS_CRYPTO("chacha20-neon"); 195 MODULE_ALIAS_CRYPTO("xchacha20"); 196 MODULE_ALIAS_CRYPTO("xchacha20-neon"); 197 MODULE_ALIAS_CRYPTO("xchacha12"); 198 MODULE_ALIAS_CRYPTO("xchacha12-neon"); 199