1 /* 2 * CTR: Counter mode 3 * 4 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <crypto/algapi.h> 14 #include <crypto/ctr.h> 15 #include <crypto/internal/skcipher.h> 16 #include <linux/err.h> 17 #include <linux/init.h> 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/random.h> 21 #include <linux/scatterlist.h> 22 #include <linux/slab.h> 23 24 struct crypto_ctr_ctx { 25 struct crypto_cipher *child; 26 }; 27 28 struct crypto_rfc3686_ctx { 29 struct crypto_skcipher *child; 30 u8 nonce[CTR_RFC3686_NONCE_SIZE]; 31 }; 32 33 struct crypto_rfc3686_req_ctx { 34 u8 iv[CTR_RFC3686_BLOCK_SIZE]; 35 struct skcipher_request subreq CRYPTO_MINALIGN_ATTR; 36 }; 37 38 static int crypto_ctr_setkey(struct crypto_tfm *parent, const u8 *key, 39 unsigned int keylen) 40 { 41 struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(parent); 42 struct crypto_cipher *child = ctx->child; 43 int err; 44 45 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 46 crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & 47 CRYPTO_TFM_REQ_MASK); 48 err = crypto_cipher_setkey(child, key, keylen); 49 crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & 50 CRYPTO_TFM_RES_MASK); 51 52 return err; 53 } 54 55 static void crypto_ctr_crypt_final(struct blkcipher_walk *walk, 56 struct crypto_cipher *tfm) 57 { 58 unsigned int bsize = crypto_cipher_blocksize(tfm); 59 unsigned long alignmask = crypto_cipher_alignmask(tfm); 60 u8 *ctrblk = walk->iv; 61 u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; 62 u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); 63 u8 *src = walk->src.virt.addr; 64 u8 *dst = walk->dst.virt.addr; 65 unsigned int nbytes = walk->nbytes; 66 67 crypto_cipher_encrypt_one(tfm, keystream, ctrblk); 68 crypto_xor_cpy(dst, keystream, src, nbytes); 69 70 crypto_inc(ctrblk, bsize); 71 } 72 73 static int crypto_ctr_crypt_segment(struct blkcipher_walk *walk, 74 struct crypto_cipher *tfm) 75 { 76 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 77 crypto_cipher_alg(tfm)->cia_encrypt; 78 unsigned int bsize = crypto_cipher_blocksize(tfm); 79 u8 *ctrblk = walk->iv; 80 u8 *src = walk->src.virt.addr; 81 u8 *dst = walk->dst.virt.addr; 82 unsigned int nbytes = walk->nbytes; 83 84 do { 85 /* create keystream */ 86 fn(crypto_cipher_tfm(tfm), dst, ctrblk); 87 crypto_xor(dst, src, bsize); 88 89 /* increment counter in counterblock */ 90 crypto_inc(ctrblk, bsize); 91 92 src += bsize; 93 dst += bsize; 94 } while ((nbytes -= bsize) >= bsize); 95 96 return nbytes; 97 } 98 99 static int crypto_ctr_crypt_inplace(struct blkcipher_walk *walk, 100 struct crypto_cipher *tfm) 101 { 102 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = 103 crypto_cipher_alg(tfm)->cia_encrypt; 104 unsigned int bsize = crypto_cipher_blocksize(tfm); 105 unsigned long alignmask = crypto_cipher_alignmask(tfm); 106 unsigned int nbytes = walk->nbytes; 107 u8 *ctrblk = walk->iv; 108 u8 *src = walk->src.virt.addr; 109 u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; 110 u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); 111 112 do { 113 /* create keystream */ 114 fn(crypto_cipher_tfm(tfm), keystream, ctrblk); 115 crypto_xor(src, keystream, bsize); 116 117 /* increment counter in counterblock */ 118 crypto_inc(ctrblk, bsize); 119 120 src += bsize; 121 } while ((nbytes -= bsize) >= bsize); 122 123 return nbytes; 124 } 125 126 static int crypto_ctr_crypt(struct blkcipher_desc *desc, 127 struct scatterlist *dst, struct scatterlist *src, 128 unsigned int nbytes) 129 { 130 struct blkcipher_walk walk; 131 struct crypto_blkcipher *tfm = desc->tfm; 132 struct crypto_ctr_ctx *ctx = crypto_blkcipher_ctx(tfm); 133 struct crypto_cipher *child = ctx->child; 134 unsigned int bsize = crypto_cipher_blocksize(child); 135 int err; 136 137 blkcipher_walk_init(&walk, dst, src, nbytes); 138 err = blkcipher_walk_virt_block(desc, &walk, bsize); 139 140 while (walk.nbytes >= bsize) { 141 if (walk.src.virt.addr == walk.dst.virt.addr) 142 nbytes = crypto_ctr_crypt_inplace(&walk, child); 143 else 144 nbytes = crypto_ctr_crypt_segment(&walk, child); 145 146 err = blkcipher_walk_done(desc, &walk, nbytes); 147 } 148 149 if (walk.nbytes) { 150 crypto_ctr_crypt_final(&walk, child); 151 err = blkcipher_walk_done(desc, &walk, 0); 152 } 153 154 return err; 155 } 156 157 static int crypto_ctr_init_tfm(struct crypto_tfm *tfm) 158 { 159 struct crypto_instance *inst = (void *)tfm->__crt_alg; 160 struct crypto_spawn *spawn = crypto_instance_ctx(inst); 161 struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 162 struct crypto_cipher *cipher; 163 164 cipher = crypto_spawn_cipher(spawn); 165 if (IS_ERR(cipher)) 166 return PTR_ERR(cipher); 167 168 ctx->child = cipher; 169 170 return 0; 171 } 172 173 static void crypto_ctr_exit_tfm(struct crypto_tfm *tfm) 174 { 175 struct crypto_ctr_ctx *ctx = crypto_tfm_ctx(tfm); 176 177 crypto_free_cipher(ctx->child); 178 } 179 180 static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb) 181 { 182 struct crypto_instance *inst; 183 struct crypto_attr_type *algt; 184 struct crypto_alg *alg; 185 u32 mask; 186 int err; 187 188 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); 189 if (err) 190 return ERR_PTR(err); 191 192 algt = crypto_get_attr_type(tb); 193 if (IS_ERR(algt)) 194 return ERR_CAST(algt); 195 196 mask = CRYPTO_ALG_TYPE_MASK | 197 crypto_requires_off(algt->type, algt->mask, 198 CRYPTO_ALG_NEED_FALLBACK); 199 200 alg = crypto_attr_alg(tb[1], CRYPTO_ALG_TYPE_CIPHER, mask); 201 if (IS_ERR(alg)) 202 return ERR_CAST(alg); 203 204 /* Block size must be >= 4 bytes. */ 205 err = -EINVAL; 206 if (alg->cra_blocksize < 4) 207 goto out_put_alg; 208 209 /* If this is false we'd fail the alignment of crypto_inc. */ 210 if (alg->cra_blocksize % 4) 211 goto out_put_alg; 212 213 inst = crypto_alloc_instance("ctr", alg); 214 if (IS_ERR(inst)) 215 goto out; 216 217 inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; 218 inst->alg.cra_priority = alg->cra_priority; 219 inst->alg.cra_blocksize = 1; 220 inst->alg.cra_alignmask = alg->cra_alignmask; 221 inst->alg.cra_type = &crypto_blkcipher_type; 222 223 inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; 224 inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 225 inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize; 226 227 inst->alg.cra_ctxsize = sizeof(struct crypto_ctr_ctx); 228 229 inst->alg.cra_init = crypto_ctr_init_tfm; 230 inst->alg.cra_exit = crypto_ctr_exit_tfm; 231 232 inst->alg.cra_blkcipher.setkey = crypto_ctr_setkey; 233 inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt; 234 inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt; 235 236 out: 237 crypto_mod_put(alg); 238 return inst; 239 240 out_put_alg: 241 inst = ERR_PTR(err); 242 goto out; 243 } 244 245 static void crypto_ctr_free(struct crypto_instance *inst) 246 { 247 crypto_drop_spawn(crypto_instance_ctx(inst)); 248 kfree(inst); 249 } 250 251 static struct crypto_template crypto_ctr_tmpl = { 252 .name = "ctr", 253 .alloc = crypto_ctr_alloc, 254 .free = crypto_ctr_free, 255 .module = THIS_MODULE, 256 }; 257 258 static int crypto_rfc3686_setkey(struct crypto_skcipher *parent, 259 const u8 *key, unsigned int keylen) 260 { 261 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent); 262 struct crypto_skcipher *child = ctx->child; 263 int err; 264 265 /* the nonce is stored in bytes at end of key */ 266 if (keylen < CTR_RFC3686_NONCE_SIZE) 267 return -EINVAL; 268 269 memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), 270 CTR_RFC3686_NONCE_SIZE); 271 272 keylen -= CTR_RFC3686_NONCE_SIZE; 273 274 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 275 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & 276 CRYPTO_TFM_REQ_MASK); 277 err = crypto_skcipher_setkey(child, key, keylen); 278 crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) & 279 CRYPTO_TFM_RES_MASK); 280 281 return err; 282 } 283 284 static int crypto_rfc3686_crypt(struct skcipher_request *req) 285 { 286 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 287 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); 288 struct crypto_skcipher *child = ctx->child; 289 unsigned long align = crypto_skcipher_alignmask(tfm); 290 struct crypto_rfc3686_req_ctx *rctx = 291 (void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1); 292 struct skcipher_request *subreq = &rctx->subreq; 293 u8 *iv = rctx->iv; 294 295 /* set up counter block */ 296 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE); 297 memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); 298 299 /* initialize counter portion of counter block */ 300 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = 301 cpu_to_be32(1); 302 303 skcipher_request_set_tfm(subreq, child); 304 skcipher_request_set_callback(subreq, req->base.flags, 305 req->base.complete, req->base.data); 306 skcipher_request_set_crypt(subreq, req->src, req->dst, 307 req->cryptlen, iv); 308 309 return crypto_skcipher_encrypt(subreq); 310 } 311 312 static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm) 313 { 314 struct skcipher_instance *inst = skcipher_alg_instance(tfm); 315 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 316 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); 317 struct crypto_skcipher *cipher; 318 unsigned long align; 319 unsigned int reqsize; 320 321 cipher = crypto_spawn_skcipher(spawn); 322 if (IS_ERR(cipher)) 323 return PTR_ERR(cipher); 324 325 ctx->child = cipher; 326 327 align = crypto_skcipher_alignmask(tfm); 328 align &= ~(crypto_tfm_ctx_alignment() - 1); 329 reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) + 330 crypto_skcipher_reqsize(cipher); 331 crypto_skcipher_set_reqsize(tfm, reqsize); 332 333 return 0; 334 } 335 336 static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm) 337 { 338 struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm); 339 340 crypto_free_skcipher(ctx->child); 341 } 342 343 static void crypto_rfc3686_free(struct skcipher_instance *inst) 344 { 345 struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); 346 347 crypto_drop_skcipher(spawn); 348 kfree(inst); 349 } 350 351 static int crypto_rfc3686_create(struct crypto_template *tmpl, 352 struct rtattr **tb) 353 { 354 struct crypto_attr_type *algt; 355 struct skcipher_instance *inst; 356 struct skcipher_alg *alg; 357 struct crypto_skcipher_spawn *spawn; 358 const char *cipher_name; 359 u32 mask; 360 361 int err; 362 363 algt = crypto_get_attr_type(tb); 364 if (IS_ERR(algt)) 365 return PTR_ERR(algt); 366 367 if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask) 368 return -EINVAL; 369 370 cipher_name = crypto_attr_alg_name(tb[1]); 371 if (IS_ERR(cipher_name)) 372 return PTR_ERR(cipher_name); 373 374 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); 375 if (!inst) 376 return -ENOMEM; 377 378 mask = crypto_requires_sync(algt->type, algt->mask) | 379 crypto_requires_off(algt->type, algt->mask, 380 CRYPTO_ALG_NEED_FALLBACK); 381 382 spawn = skcipher_instance_ctx(inst); 383 384 crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst)); 385 err = crypto_grab_skcipher(spawn, cipher_name, 0, mask); 386 if (err) 387 goto err_free_inst; 388 389 alg = crypto_spawn_skcipher_alg(spawn); 390 391 /* We only support 16-byte blocks. */ 392 err = -EINVAL; 393 if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE) 394 goto err_drop_spawn; 395 396 /* Not a stream cipher? */ 397 if (alg->base.cra_blocksize != 1) 398 goto err_drop_spawn; 399 400 err = -ENAMETOOLONG; 401 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, 402 "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME) 403 goto err_drop_spawn; 404 if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, 405 "rfc3686(%s)", alg->base.cra_driver_name) >= 406 CRYPTO_MAX_ALG_NAME) 407 goto err_drop_spawn; 408 409 inst->alg.base.cra_priority = alg->base.cra_priority; 410 inst->alg.base.cra_blocksize = 1; 411 inst->alg.base.cra_alignmask = alg->base.cra_alignmask; 412 413 inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC; 414 415 inst->alg.ivsize = CTR_RFC3686_IV_SIZE; 416 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); 417 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) + 418 CTR_RFC3686_NONCE_SIZE; 419 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) + 420 CTR_RFC3686_NONCE_SIZE; 421 422 inst->alg.setkey = crypto_rfc3686_setkey; 423 inst->alg.encrypt = crypto_rfc3686_crypt; 424 inst->alg.decrypt = crypto_rfc3686_crypt; 425 426 inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); 427 428 inst->alg.init = crypto_rfc3686_init_tfm; 429 inst->alg.exit = crypto_rfc3686_exit_tfm; 430 431 inst->free = crypto_rfc3686_free; 432 433 err = skcipher_register_instance(tmpl, inst); 434 if (err) 435 goto err_drop_spawn; 436 437 out: 438 return err; 439 440 err_drop_spawn: 441 crypto_drop_skcipher(spawn); 442 err_free_inst: 443 kfree(inst); 444 goto out; 445 } 446 447 static struct crypto_template crypto_rfc3686_tmpl = { 448 .name = "rfc3686", 449 .create = crypto_rfc3686_create, 450 .module = THIS_MODULE, 451 }; 452 453 static int __init crypto_ctr_module_init(void) 454 { 455 int err; 456 457 err = crypto_register_template(&crypto_ctr_tmpl); 458 if (err) 459 goto out; 460 461 err = crypto_register_template(&crypto_rfc3686_tmpl); 462 if (err) 463 goto out_drop_ctr; 464 465 out: 466 return err; 467 468 out_drop_ctr: 469 crypto_unregister_template(&crypto_ctr_tmpl); 470 goto out; 471 } 472 473 static void __exit crypto_ctr_module_exit(void) 474 { 475 crypto_unregister_template(&crypto_rfc3686_tmpl); 476 crypto_unregister_template(&crypto_ctr_tmpl); 477 } 478 479 module_init(crypto_ctr_module_init); 480 module_exit(crypto_ctr_module_exit); 481 482 MODULE_LICENSE("GPL"); 483 MODULE_DESCRIPTION("CTR Counter block mode"); 484 MODULE_ALIAS_CRYPTO("rfc3686"); 485 MODULE_ALIAS_CRYPTO("ctr"); 486