xref: /openbmc/linux/crypto/ctr.c (revision 674f368a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CTR: Counter mode
4  *
5  * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
6  */
7 
8 #include <crypto/algapi.h>
9 #include <crypto/ctr.h>
10 #include <crypto/internal/skcipher.h>
11 #include <linux/err.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 
17 struct crypto_rfc3686_ctx {
18 	struct crypto_skcipher *child;
19 	u8 nonce[CTR_RFC3686_NONCE_SIZE];
20 };
21 
22 struct crypto_rfc3686_req_ctx {
23 	u8 iv[CTR_RFC3686_BLOCK_SIZE];
24 	struct skcipher_request subreq CRYPTO_MINALIGN_ATTR;
25 };
26 
27 static void crypto_ctr_crypt_final(struct skcipher_walk *walk,
28 				   struct crypto_cipher *tfm)
29 {
30 	unsigned int bsize = crypto_cipher_blocksize(tfm);
31 	unsigned long alignmask = crypto_cipher_alignmask(tfm);
32 	u8 *ctrblk = walk->iv;
33 	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
34 	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
35 	u8 *src = walk->src.virt.addr;
36 	u8 *dst = walk->dst.virt.addr;
37 	unsigned int nbytes = walk->nbytes;
38 
39 	crypto_cipher_encrypt_one(tfm, keystream, ctrblk);
40 	crypto_xor_cpy(dst, keystream, src, nbytes);
41 
42 	crypto_inc(ctrblk, bsize);
43 }
44 
45 static int crypto_ctr_crypt_segment(struct skcipher_walk *walk,
46 				    struct crypto_cipher *tfm)
47 {
48 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
49 		   crypto_cipher_alg(tfm)->cia_encrypt;
50 	unsigned int bsize = crypto_cipher_blocksize(tfm);
51 	u8 *ctrblk = walk->iv;
52 	u8 *src = walk->src.virt.addr;
53 	u8 *dst = walk->dst.virt.addr;
54 	unsigned int nbytes = walk->nbytes;
55 
56 	do {
57 		/* create keystream */
58 		fn(crypto_cipher_tfm(tfm), dst, ctrblk);
59 		crypto_xor(dst, src, bsize);
60 
61 		/* increment counter in counterblock */
62 		crypto_inc(ctrblk, bsize);
63 
64 		src += bsize;
65 		dst += bsize;
66 	} while ((nbytes -= bsize) >= bsize);
67 
68 	return nbytes;
69 }
70 
71 static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk,
72 				    struct crypto_cipher *tfm)
73 {
74 	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
75 		   crypto_cipher_alg(tfm)->cia_encrypt;
76 	unsigned int bsize = crypto_cipher_blocksize(tfm);
77 	unsigned long alignmask = crypto_cipher_alignmask(tfm);
78 	unsigned int nbytes = walk->nbytes;
79 	u8 *ctrblk = walk->iv;
80 	u8 *src = walk->src.virt.addr;
81 	u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
82 	u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
83 
84 	do {
85 		/* create keystream */
86 		fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
87 		crypto_xor(src, keystream, bsize);
88 
89 		/* increment counter in counterblock */
90 		crypto_inc(ctrblk, bsize);
91 
92 		src += bsize;
93 	} while ((nbytes -= bsize) >= bsize);
94 
95 	return nbytes;
96 }
97 
98 static int crypto_ctr_crypt(struct skcipher_request *req)
99 {
100 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
101 	struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
102 	const unsigned int bsize = crypto_cipher_blocksize(cipher);
103 	struct skcipher_walk walk;
104 	unsigned int nbytes;
105 	int err;
106 
107 	err = skcipher_walk_virt(&walk, req, false);
108 
109 	while (walk.nbytes >= bsize) {
110 		if (walk.src.virt.addr == walk.dst.virt.addr)
111 			nbytes = crypto_ctr_crypt_inplace(&walk, cipher);
112 		else
113 			nbytes = crypto_ctr_crypt_segment(&walk, cipher);
114 
115 		err = skcipher_walk_done(&walk, nbytes);
116 	}
117 
118 	if (walk.nbytes) {
119 		crypto_ctr_crypt_final(&walk, cipher);
120 		err = skcipher_walk_done(&walk, 0);
121 	}
122 
123 	return err;
124 }
125 
126 static int crypto_ctr_create(struct crypto_template *tmpl, struct rtattr **tb)
127 {
128 	struct skcipher_instance *inst;
129 	struct crypto_alg *alg;
130 	int err;
131 
132 	inst = skcipher_alloc_instance_simple(tmpl, tb);
133 	if (IS_ERR(inst))
134 		return PTR_ERR(inst);
135 
136 	alg = skcipher_ialg_simple(inst);
137 
138 	/* Block size must be >= 4 bytes. */
139 	err = -EINVAL;
140 	if (alg->cra_blocksize < 4)
141 		goto out_free_inst;
142 
143 	/* If this is false we'd fail the alignment of crypto_inc. */
144 	if (alg->cra_blocksize % 4)
145 		goto out_free_inst;
146 
147 	/* CTR mode is a stream cipher. */
148 	inst->alg.base.cra_blocksize = 1;
149 
150 	/*
151 	 * To simplify the implementation, configure the skcipher walk to only
152 	 * give a partial block at the very end, never earlier.
153 	 */
154 	inst->alg.chunksize = alg->cra_blocksize;
155 
156 	inst->alg.encrypt = crypto_ctr_crypt;
157 	inst->alg.decrypt = crypto_ctr_crypt;
158 
159 	err = skcipher_register_instance(tmpl, inst);
160 	if (err) {
161 out_free_inst:
162 		inst->free(inst);
163 	}
164 
165 	return err;
166 }
167 
168 static int crypto_rfc3686_setkey(struct crypto_skcipher *parent,
169 				 const u8 *key, unsigned int keylen)
170 {
171 	struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(parent);
172 	struct crypto_skcipher *child = ctx->child;
173 	int err;
174 
175 	/* the nonce is stored in bytes at end of key */
176 	if (keylen < CTR_RFC3686_NONCE_SIZE)
177 		return -EINVAL;
178 
179 	memcpy(ctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
180 	       CTR_RFC3686_NONCE_SIZE);
181 
182 	keylen -= CTR_RFC3686_NONCE_SIZE;
183 
184 	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
185 	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
186 					 CRYPTO_TFM_REQ_MASK);
187 	err = crypto_skcipher_setkey(child, key, keylen);
188 	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
189 					  CRYPTO_TFM_RES_MASK);
190 
191 	return err;
192 }
193 
194 static int crypto_rfc3686_crypt(struct skcipher_request *req)
195 {
196 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
197 	struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
198 	struct crypto_skcipher *child = ctx->child;
199 	unsigned long align = crypto_skcipher_alignmask(tfm);
200 	struct crypto_rfc3686_req_ctx *rctx =
201 		(void *)PTR_ALIGN((u8 *)skcipher_request_ctx(req), align + 1);
202 	struct skcipher_request *subreq = &rctx->subreq;
203 	u8 *iv = rctx->iv;
204 
205 	/* set up counter block */
206 	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
207 	memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE);
208 
209 	/* initialize counter portion of counter block */
210 	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
211 		cpu_to_be32(1);
212 
213 	skcipher_request_set_tfm(subreq, child);
214 	skcipher_request_set_callback(subreq, req->base.flags,
215 				      req->base.complete, req->base.data);
216 	skcipher_request_set_crypt(subreq, req->src, req->dst,
217 				   req->cryptlen, iv);
218 
219 	return crypto_skcipher_encrypt(subreq);
220 }
221 
222 static int crypto_rfc3686_init_tfm(struct crypto_skcipher *tfm)
223 {
224 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
225 	struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
226 	struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
227 	struct crypto_skcipher *cipher;
228 	unsigned long align;
229 	unsigned int reqsize;
230 
231 	cipher = crypto_spawn_skcipher(spawn);
232 	if (IS_ERR(cipher))
233 		return PTR_ERR(cipher);
234 
235 	ctx->child = cipher;
236 
237 	align = crypto_skcipher_alignmask(tfm);
238 	align &= ~(crypto_tfm_ctx_alignment() - 1);
239 	reqsize = align + sizeof(struct crypto_rfc3686_req_ctx) +
240 		  crypto_skcipher_reqsize(cipher);
241 	crypto_skcipher_set_reqsize(tfm, reqsize);
242 
243 	return 0;
244 }
245 
246 static void crypto_rfc3686_exit_tfm(struct crypto_skcipher *tfm)
247 {
248 	struct crypto_rfc3686_ctx *ctx = crypto_skcipher_ctx(tfm);
249 
250 	crypto_free_skcipher(ctx->child);
251 }
252 
253 static void crypto_rfc3686_free(struct skcipher_instance *inst)
254 {
255 	struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst);
256 
257 	crypto_drop_skcipher(spawn);
258 	kfree(inst);
259 }
260 
261 static int crypto_rfc3686_create(struct crypto_template *tmpl,
262 				 struct rtattr **tb)
263 {
264 	struct crypto_attr_type *algt;
265 	struct skcipher_instance *inst;
266 	struct skcipher_alg *alg;
267 	struct crypto_skcipher_spawn *spawn;
268 	const char *cipher_name;
269 	u32 mask;
270 
271 	int err;
272 
273 	algt = crypto_get_attr_type(tb);
274 	if (IS_ERR(algt))
275 		return PTR_ERR(algt);
276 
277 	if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
278 		return -EINVAL;
279 
280 	cipher_name = crypto_attr_alg_name(tb[1]);
281 	if (IS_ERR(cipher_name))
282 		return PTR_ERR(cipher_name);
283 
284 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
285 	if (!inst)
286 		return -ENOMEM;
287 
288 	mask = crypto_requires_sync(algt->type, algt->mask) |
289 		crypto_requires_off(algt->type, algt->mask,
290 				    CRYPTO_ALG_NEED_FALLBACK);
291 
292 	spawn = skcipher_instance_ctx(inst);
293 
294 	crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
295 	err = crypto_grab_skcipher(spawn, cipher_name, 0, mask);
296 	if (err)
297 		goto err_free_inst;
298 
299 	alg = crypto_spawn_skcipher_alg(spawn);
300 
301 	/* We only support 16-byte blocks. */
302 	err = -EINVAL;
303 	if (crypto_skcipher_alg_ivsize(alg) != CTR_RFC3686_BLOCK_SIZE)
304 		goto err_drop_spawn;
305 
306 	/* Not a stream cipher? */
307 	if (alg->base.cra_blocksize != 1)
308 		goto err_drop_spawn;
309 
310 	err = -ENAMETOOLONG;
311 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
312 		     "rfc3686(%s)", alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
313 		goto err_drop_spawn;
314 	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
315 		     "rfc3686(%s)", alg->base.cra_driver_name) >=
316 	    CRYPTO_MAX_ALG_NAME)
317 		goto err_drop_spawn;
318 
319 	inst->alg.base.cra_priority = alg->base.cra_priority;
320 	inst->alg.base.cra_blocksize = 1;
321 	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
322 
323 	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
324 
325 	inst->alg.ivsize = CTR_RFC3686_IV_SIZE;
326 	inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
327 	inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
328 				CTR_RFC3686_NONCE_SIZE;
329 	inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) +
330 				CTR_RFC3686_NONCE_SIZE;
331 
332 	inst->alg.setkey = crypto_rfc3686_setkey;
333 	inst->alg.encrypt = crypto_rfc3686_crypt;
334 	inst->alg.decrypt = crypto_rfc3686_crypt;
335 
336 	inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx);
337 
338 	inst->alg.init = crypto_rfc3686_init_tfm;
339 	inst->alg.exit = crypto_rfc3686_exit_tfm;
340 
341 	inst->free = crypto_rfc3686_free;
342 
343 	err = skcipher_register_instance(tmpl, inst);
344 	if (err)
345 		goto err_drop_spawn;
346 
347 out:
348 	return err;
349 
350 err_drop_spawn:
351 	crypto_drop_skcipher(spawn);
352 err_free_inst:
353 	kfree(inst);
354 	goto out;
355 }
356 
357 static struct crypto_template crypto_ctr_tmpls[] = {
358 	{
359 		.name = "ctr",
360 		.create = crypto_ctr_create,
361 		.module = THIS_MODULE,
362 	}, {
363 		.name = "rfc3686",
364 		.create = crypto_rfc3686_create,
365 		.module = THIS_MODULE,
366 	},
367 };
368 
369 static int __init crypto_ctr_module_init(void)
370 {
371 	return crypto_register_templates(crypto_ctr_tmpls,
372 					 ARRAY_SIZE(crypto_ctr_tmpls));
373 }
374 
375 static void __exit crypto_ctr_module_exit(void)
376 {
377 	crypto_unregister_templates(crypto_ctr_tmpls,
378 				    ARRAY_SIZE(crypto_ctr_tmpls));
379 }
380 
381 subsys_initcall(crypto_ctr_module_init);
382 module_exit(crypto_ctr_module_exit);
383 
384 MODULE_LICENSE("GPL");
385 MODULE_DESCRIPTION("CTR block cipher mode of operation");
386 MODULE_ALIAS_CRYPTO("rfc3686");
387 MODULE_ALIAS_CRYPTO("ctr");
388