xref: /openbmc/linux/crypto/skcipher.c (revision 6724ed7f)
1 /*
2  * Symmetric key cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16 
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/compiler.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/rtnetlink.h>
26 #include <linux/seq_file.h>
27 #include <net/netlink.h>
28 
29 #include "internal.h"
30 
31 enum {
32 	SKCIPHER_WALK_PHYS = 1 << 0,
33 	SKCIPHER_WALK_SLOW = 1 << 1,
34 	SKCIPHER_WALK_COPY = 1 << 2,
35 	SKCIPHER_WALK_DIFF = 1 << 3,
36 	SKCIPHER_WALK_SLEEP = 1 << 4,
37 };
38 
39 struct skcipher_walk_buffer {
40 	struct list_head entry;
41 	struct scatter_walk dst;
42 	unsigned int len;
43 	u8 *data;
44 	u8 buffer[];
45 };
46 
47 static int skcipher_walk_next(struct skcipher_walk *walk);
48 
49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
50 {
51 	if (PageHighMem(scatterwalk_page(walk)))
52 		kunmap_atomic(vaddr);
53 }
54 
55 static inline void *skcipher_map(struct scatter_walk *walk)
56 {
57 	struct page *page = scatterwalk_page(walk);
58 
59 	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
60 	       offset_in_page(walk->offset);
61 }
62 
63 static inline void skcipher_map_src(struct skcipher_walk *walk)
64 {
65 	walk->src.virt.addr = skcipher_map(&walk->in);
66 }
67 
68 static inline void skcipher_map_dst(struct skcipher_walk *walk)
69 {
70 	walk->dst.virt.addr = skcipher_map(&walk->out);
71 }
72 
73 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
74 {
75 	skcipher_unmap(&walk->in, walk->src.virt.addr);
76 }
77 
78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
79 {
80 	skcipher_unmap(&walk->out, walk->dst.virt.addr);
81 }
82 
83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
84 {
85 	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
86 }
87 
88 /* Get a spot of the specified length that does not straddle a page.
89  * The caller needs to ensure that there is enough space for this operation.
90  */
91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
92 {
93 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
94 
95 	return max(start, end_page);
96 }
97 
98 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
99 {
100 	u8 *addr;
101 
102 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
103 	addr = skcipher_get_spot(addr, bsize);
104 	scatterwalk_copychunks(addr, &walk->out, bsize,
105 			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
106 	return 0;
107 }
108 
109 int skcipher_walk_done(struct skcipher_walk *walk, int err)
110 {
111 	unsigned int n = walk->nbytes - err;
112 	unsigned int nbytes;
113 
114 	nbytes = walk->total - n;
115 
116 	if (unlikely(err < 0)) {
117 		nbytes = 0;
118 		n = 0;
119 	} else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
120 					   SKCIPHER_WALK_SLOW |
121 					   SKCIPHER_WALK_COPY |
122 					   SKCIPHER_WALK_DIFF)))) {
123 unmap_src:
124 		skcipher_unmap_src(walk);
125 	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
126 		skcipher_unmap_dst(walk);
127 		goto unmap_src;
128 	} else if (walk->flags & SKCIPHER_WALK_COPY) {
129 		skcipher_map_dst(walk);
130 		memcpy(walk->dst.virt.addr, walk->page, n);
131 		skcipher_unmap_dst(walk);
132 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
133 		if (WARN_ON(err)) {
134 			err = -EINVAL;
135 			nbytes = 0;
136 		} else
137 			n = skcipher_done_slow(walk, n);
138 	}
139 
140 	if (err > 0)
141 		err = 0;
142 
143 	walk->total = nbytes;
144 	walk->nbytes = nbytes;
145 
146 	scatterwalk_advance(&walk->in, n);
147 	scatterwalk_advance(&walk->out, n);
148 	scatterwalk_done(&walk->in, 0, nbytes);
149 	scatterwalk_done(&walk->out, 1, nbytes);
150 
151 	if (nbytes) {
152 		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
153 			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
154 		return skcipher_walk_next(walk);
155 	}
156 
157 	/* Short-circuit for the common/fast path. */
158 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
159 		goto out;
160 
161 	if (walk->flags & SKCIPHER_WALK_PHYS)
162 		goto out;
163 
164 	if (walk->iv != walk->oiv)
165 		memcpy(walk->oiv, walk->iv, walk->ivsize);
166 	if (walk->buffer != walk->page)
167 		kfree(walk->buffer);
168 	if (walk->page)
169 		free_page((unsigned long)walk->page);
170 
171 out:
172 	return err;
173 }
174 EXPORT_SYMBOL_GPL(skcipher_walk_done);
175 
176 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
177 {
178 	struct skcipher_walk_buffer *p, *tmp;
179 
180 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
181 		u8 *data;
182 
183 		if (err)
184 			goto done;
185 
186 		data = p->data;
187 		if (!data) {
188 			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
189 			data = skcipher_get_spot(data, walk->stride);
190 		}
191 
192 		scatterwalk_copychunks(data, &p->dst, p->len, 1);
193 
194 		if (offset_in_page(p->data) + p->len + walk->stride >
195 		    PAGE_SIZE)
196 			free_page((unsigned long)p->data);
197 
198 done:
199 		list_del(&p->entry);
200 		kfree(p);
201 	}
202 
203 	if (!err && walk->iv != walk->oiv)
204 		memcpy(walk->oiv, walk->iv, walk->ivsize);
205 	if (walk->buffer != walk->page)
206 		kfree(walk->buffer);
207 	if (walk->page)
208 		free_page((unsigned long)walk->page);
209 }
210 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
211 
212 static void skcipher_queue_write(struct skcipher_walk *walk,
213 				 struct skcipher_walk_buffer *p)
214 {
215 	p->dst = walk->out;
216 	list_add_tail(&p->entry, &walk->buffers);
217 }
218 
219 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
220 {
221 	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
222 	unsigned alignmask = walk->alignmask;
223 	struct skcipher_walk_buffer *p;
224 	unsigned a;
225 	unsigned n;
226 	u8 *buffer;
227 	void *v;
228 
229 	if (!phys) {
230 		if (!walk->buffer)
231 			walk->buffer = walk->page;
232 		buffer = walk->buffer;
233 		if (buffer)
234 			goto ok;
235 	}
236 
237 	/* Start with the minimum alignment of kmalloc. */
238 	a = crypto_tfm_ctx_alignment() - 1;
239 	n = bsize;
240 
241 	if (phys) {
242 		/* Calculate the minimum alignment of p->buffer. */
243 		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
244 		n += sizeof(*p);
245 	}
246 
247 	/* Minimum size to align p->buffer by alignmask. */
248 	n += alignmask & ~a;
249 
250 	/* Minimum size to ensure p->buffer does not straddle a page. */
251 	n += (bsize - 1) & ~(alignmask | a);
252 
253 	v = kzalloc(n, skcipher_walk_gfp(walk));
254 	if (!v)
255 		return skcipher_walk_done(walk, -ENOMEM);
256 
257 	if (phys) {
258 		p = v;
259 		p->len = bsize;
260 		skcipher_queue_write(walk, p);
261 		buffer = p->buffer;
262 	} else {
263 		walk->buffer = v;
264 		buffer = v;
265 	}
266 
267 ok:
268 	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
269 	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
270 	walk->src.virt.addr = walk->dst.virt.addr;
271 
272 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
273 
274 	walk->nbytes = bsize;
275 	walk->flags |= SKCIPHER_WALK_SLOW;
276 
277 	return 0;
278 }
279 
280 static int skcipher_next_copy(struct skcipher_walk *walk)
281 {
282 	struct skcipher_walk_buffer *p;
283 	u8 *tmp = walk->page;
284 
285 	skcipher_map_src(walk);
286 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
287 	skcipher_unmap_src(walk);
288 
289 	walk->src.virt.addr = tmp;
290 	walk->dst.virt.addr = tmp;
291 
292 	if (!(walk->flags & SKCIPHER_WALK_PHYS))
293 		return 0;
294 
295 	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
296 	if (!p)
297 		return -ENOMEM;
298 
299 	p->data = walk->page;
300 	p->len = walk->nbytes;
301 	skcipher_queue_write(walk, p);
302 
303 	if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
304 	    PAGE_SIZE)
305 		walk->page = NULL;
306 	else
307 		walk->page += walk->nbytes;
308 
309 	return 0;
310 }
311 
312 static int skcipher_next_fast(struct skcipher_walk *walk)
313 {
314 	unsigned long diff;
315 
316 	walk->src.phys.page = scatterwalk_page(&walk->in);
317 	walk->src.phys.offset = offset_in_page(walk->in.offset);
318 	walk->dst.phys.page = scatterwalk_page(&walk->out);
319 	walk->dst.phys.offset = offset_in_page(walk->out.offset);
320 
321 	if (walk->flags & SKCIPHER_WALK_PHYS)
322 		return 0;
323 
324 	diff = walk->src.phys.offset - walk->dst.phys.offset;
325 	diff |= walk->src.virt.page - walk->dst.virt.page;
326 
327 	skcipher_map_src(walk);
328 	walk->dst.virt.addr = walk->src.virt.addr;
329 
330 	if (diff) {
331 		walk->flags |= SKCIPHER_WALK_DIFF;
332 		skcipher_map_dst(walk);
333 	}
334 
335 	return 0;
336 }
337 
338 static int skcipher_walk_next(struct skcipher_walk *walk)
339 {
340 	unsigned int bsize;
341 	unsigned int n;
342 	int err;
343 
344 	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
345 			 SKCIPHER_WALK_DIFF);
346 
347 	n = walk->total;
348 	bsize = min(walk->stride, max(n, walk->blocksize));
349 	n = scatterwalk_clamp(&walk->in, n);
350 	n = scatterwalk_clamp(&walk->out, n);
351 
352 	if (unlikely(n < bsize)) {
353 		if (unlikely(walk->total < walk->blocksize))
354 			return skcipher_walk_done(walk, -EINVAL);
355 
356 slow_path:
357 		err = skcipher_next_slow(walk, bsize);
358 		goto set_phys_lowmem;
359 	}
360 
361 	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
362 		if (!walk->page) {
363 			gfp_t gfp = skcipher_walk_gfp(walk);
364 
365 			walk->page = (void *)__get_free_page(gfp);
366 			if (!walk->page)
367 				goto slow_path;
368 		}
369 
370 		walk->nbytes = min_t(unsigned, n,
371 				     PAGE_SIZE - offset_in_page(walk->page));
372 		walk->flags |= SKCIPHER_WALK_COPY;
373 		err = skcipher_next_copy(walk);
374 		goto set_phys_lowmem;
375 	}
376 
377 	walk->nbytes = n;
378 
379 	return skcipher_next_fast(walk);
380 
381 set_phys_lowmem:
382 	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
383 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
384 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
385 		walk->src.phys.offset &= PAGE_SIZE - 1;
386 		walk->dst.phys.offset &= PAGE_SIZE - 1;
387 	}
388 	return err;
389 }
390 EXPORT_SYMBOL_GPL(skcipher_walk_next);
391 
392 static int skcipher_copy_iv(struct skcipher_walk *walk)
393 {
394 	unsigned a = crypto_tfm_ctx_alignment() - 1;
395 	unsigned alignmask = walk->alignmask;
396 	unsigned ivsize = walk->ivsize;
397 	unsigned bs = walk->stride;
398 	unsigned aligned_bs;
399 	unsigned size;
400 	u8 *iv;
401 
402 	aligned_bs = ALIGN(bs, alignmask);
403 
404 	/* Minimum size to align buffer by alignmask. */
405 	size = alignmask & ~a;
406 
407 	if (walk->flags & SKCIPHER_WALK_PHYS)
408 		size += ivsize;
409 	else {
410 		size += aligned_bs + ivsize;
411 
412 		/* Minimum size to ensure buffer does not straddle a page. */
413 		size += (bs - 1) & ~(alignmask | a);
414 	}
415 
416 	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
417 	if (!walk->buffer)
418 		return -ENOMEM;
419 
420 	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
421 	iv = skcipher_get_spot(iv, bs) + aligned_bs;
422 
423 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
424 	return 0;
425 }
426 
427 static int skcipher_walk_first(struct skcipher_walk *walk)
428 {
429 	if (WARN_ON_ONCE(in_irq()))
430 		return -EDEADLK;
431 
432 	walk->buffer = NULL;
433 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
434 		int err = skcipher_copy_iv(walk);
435 		if (err)
436 			return err;
437 	}
438 
439 	walk->page = NULL;
440 	walk->nbytes = walk->total;
441 
442 	return skcipher_walk_next(walk);
443 }
444 
445 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
446 				  struct skcipher_request *req)
447 {
448 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
449 
450 	walk->total = req->cryptlen;
451 	walk->nbytes = 0;
452 	walk->iv = req->iv;
453 	walk->oiv = req->iv;
454 
455 	if (unlikely(!walk->total))
456 		return 0;
457 
458 	scatterwalk_start(&walk->in, req->src);
459 	scatterwalk_start(&walk->out, req->dst);
460 
461 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
462 	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
463 		       SKCIPHER_WALK_SLEEP : 0;
464 
465 	walk->blocksize = crypto_skcipher_blocksize(tfm);
466 	walk->stride = crypto_skcipher_walksize(tfm);
467 	walk->ivsize = crypto_skcipher_ivsize(tfm);
468 	walk->alignmask = crypto_skcipher_alignmask(tfm);
469 
470 	return skcipher_walk_first(walk);
471 }
472 
473 int skcipher_walk_virt(struct skcipher_walk *walk,
474 		       struct skcipher_request *req, bool atomic)
475 {
476 	int err;
477 
478 	walk->flags &= ~SKCIPHER_WALK_PHYS;
479 
480 	err = skcipher_walk_skcipher(walk, req);
481 
482 	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
483 
484 	return err;
485 }
486 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
487 
488 void skcipher_walk_atomise(struct skcipher_walk *walk)
489 {
490 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
491 }
492 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
493 
494 int skcipher_walk_async(struct skcipher_walk *walk,
495 			struct skcipher_request *req)
496 {
497 	walk->flags |= SKCIPHER_WALK_PHYS;
498 
499 	INIT_LIST_HEAD(&walk->buffers);
500 
501 	return skcipher_walk_skcipher(walk, req);
502 }
503 EXPORT_SYMBOL_GPL(skcipher_walk_async);
504 
505 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
506 				     struct aead_request *req, bool atomic)
507 {
508 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
509 	int err;
510 
511 	walk->nbytes = 0;
512 	walk->iv = req->iv;
513 	walk->oiv = req->iv;
514 
515 	if (unlikely(!walk->total))
516 		return 0;
517 
518 	walk->flags &= ~SKCIPHER_WALK_PHYS;
519 
520 	scatterwalk_start(&walk->in, req->src);
521 	scatterwalk_start(&walk->out, req->dst);
522 
523 	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
524 	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
525 
526 	scatterwalk_done(&walk->in, 0, walk->total);
527 	scatterwalk_done(&walk->out, 0, walk->total);
528 
529 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
530 		walk->flags |= SKCIPHER_WALK_SLEEP;
531 	else
532 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
533 
534 	walk->blocksize = crypto_aead_blocksize(tfm);
535 	walk->stride = crypto_aead_chunksize(tfm);
536 	walk->ivsize = crypto_aead_ivsize(tfm);
537 	walk->alignmask = crypto_aead_alignmask(tfm);
538 
539 	err = skcipher_walk_first(walk);
540 
541 	if (atomic)
542 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
543 
544 	return err;
545 }
546 
547 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
548 		       bool atomic)
549 {
550 	walk->total = req->cryptlen;
551 
552 	return skcipher_walk_aead_common(walk, req, atomic);
553 }
554 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
555 
556 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
557 			       struct aead_request *req, bool atomic)
558 {
559 	walk->total = req->cryptlen;
560 
561 	return skcipher_walk_aead_common(walk, req, atomic);
562 }
563 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
564 
565 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
566 			       struct aead_request *req, bool atomic)
567 {
568 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
569 
570 	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
571 
572 	return skcipher_walk_aead_common(walk, req, atomic);
573 }
574 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
575 
576 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
577 {
578 	if (alg->cra_type == &crypto_blkcipher_type)
579 		return sizeof(struct crypto_blkcipher *);
580 
581 	if (alg->cra_type == &crypto_ablkcipher_type ||
582 	    alg->cra_type == &crypto_givcipher_type)
583 		return sizeof(struct crypto_ablkcipher *);
584 
585 	return crypto_alg_extsize(alg);
586 }
587 
588 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
589 				     const u8 *key, unsigned int keylen)
590 {
591 	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
592 	struct crypto_blkcipher *blkcipher = *ctx;
593 	int err;
594 
595 	crypto_blkcipher_clear_flags(blkcipher, ~0);
596 	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
597 					      CRYPTO_TFM_REQ_MASK);
598 	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
599 	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
600 				       CRYPTO_TFM_RES_MASK);
601 
602 	return err;
603 }
604 
605 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
606 				    int (*crypt)(struct blkcipher_desc *,
607 						 struct scatterlist *,
608 						 struct scatterlist *,
609 						 unsigned int))
610 {
611 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
612 	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
613 	struct blkcipher_desc desc = {
614 		.tfm = *ctx,
615 		.info = req->iv,
616 		.flags = req->base.flags,
617 	};
618 
619 
620 	return crypt(&desc, req->dst, req->src, req->cryptlen);
621 }
622 
623 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
624 {
625 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
626 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
627 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
628 
629 	return skcipher_crypt_blkcipher(req, alg->encrypt);
630 }
631 
632 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
633 {
634 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
635 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
636 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
637 
638 	return skcipher_crypt_blkcipher(req, alg->decrypt);
639 }
640 
641 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
642 {
643 	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
644 
645 	crypto_free_blkcipher(*ctx);
646 }
647 
648 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
649 {
650 	struct crypto_alg *calg = tfm->__crt_alg;
651 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
652 	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
653 	struct crypto_blkcipher *blkcipher;
654 	struct crypto_tfm *btfm;
655 
656 	if (!crypto_mod_get(calg))
657 		return -EAGAIN;
658 
659 	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
660 					CRYPTO_ALG_TYPE_MASK);
661 	if (IS_ERR(btfm)) {
662 		crypto_mod_put(calg);
663 		return PTR_ERR(btfm);
664 	}
665 
666 	blkcipher = __crypto_blkcipher_cast(btfm);
667 	*ctx = blkcipher;
668 	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
669 
670 	skcipher->setkey = skcipher_setkey_blkcipher;
671 	skcipher->encrypt = skcipher_encrypt_blkcipher;
672 	skcipher->decrypt = skcipher_decrypt_blkcipher;
673 
674 	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
675 	skcipher->keysize = calg->cra_blkcipher.max_keysize;
676 
677 	return 0;
678 }
679 
680 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
681 				      const u8 *key, unsigned int keylen)
682 {
683 	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
684 	struct crypto_ablkcipher *ablkcipher = *ctx;
685 	int err;
686 
687 	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
688 	crypto_ablkcipher_set_flags(ablkcipher,
689 				    crypto_skcipher_get_flags(tfm) &
690 				    CRYPTO_TFM_REQ_MASK);
691 	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
692 	crypto_skcipher_set_flags(tfm,
693 				  crypto_ablkcipher_get_flags(ablkcipher) &
694 				  CRYPTO_TFM_RES_MASK);
695 
696 	return err;
697 }
698 
699 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
700 				     int (*crypt)(struct ablkcipher_request *))
701 {
702 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
703 	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
704 	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
705 
706 	ablkcipher_request_set_tfm(subreq, *ctx);
707 	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
708 					req->base.complete, req->base.data);
709 	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
710 				     req->iv);
711 
712 	return crypt(subreq);
713 }
714 
715 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
716 {
717 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
718 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
719 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
720 
721 	return skcipher_crypt_ablkcipher(req, alg->encrypt);
722 }
723 
724 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
725 {
726 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
727 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
728 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
729 
730 	return skcipher_crypt_ablkcipher(req, alg->decrypt);
731 }
732 
733 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
734 {
735 	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
736 
737 	crypto_free_ablkcipher(*ctx);
738 }
739 
740 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
741 {
742 	struct crypto_alg *calg = tfm->__crt_alg;
743 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
744 	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
745 	struct crypto_ablkcipher *ablkcipher;
746 	struct crypto_tfm *abtfm;
747 
748 	if (!crypto_mod_get(calg))
749 		return -EAGAIN;
750 
751 	abtfm = __crypto_alloc_tfm(calg, 0, 0);
752 	if (IS_ERR(abtfm)) {
753 		crypto_mod_put(calg);
754 		return PTR_ERR(abtfm);
755 	}
756 
757 	ablkcipher = __crypto_ablkcipher_cast(abtfm);
758 	*ctx = ablkcipher;
759 	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
760 
761 	skcipher->setkey = skcipher_setkey_ablkcipher;
762 	skcipher->encrypt = skcipher_encrypt_ablkcipher;
763 	skcipher->decrypt = skcipher_decrypt_ablkcipher;
764 
765 	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
766 	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
767 			    sizeof(struct ablkcipher_request);
768 	skcipher->keysize = calg->cra_ablkcipher.max_keysize;
769 
770 	return 0;
771 }
772 
773 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
774 				     const u8 *key, unsigned int keylen)
775 {
776 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
777 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
778 	u8 *buffer, *alignbuffer;
779 	unsigned long absize;
780 	int ret;
781 
782 	absize = keylen + alignmask;
783 	buffer = kmalloc(absize, GFP_ATOMIC);
784 	if (!buffer)
785 		return -ENOMEM;
786 
787 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
788 	memcpy(alignbuffer, key, keylen);
789 	ret = cipher->setkey(tfm, alignbuffer, keylen);
790 	kzfree(buffer);
791 	return ret;
792 }
793 
794 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
795 			   unsigned int keylen)
796 {
797 	struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
798 	unsigned long alignmask = crypto_skcipher_alignmask(tfm);
799 
800 	if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
801 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
802 		return -EINVAL;
803 	}
804 
805 	if ((unsigned long)key & alignmask)
806 		return skcipher_setkey_unaligned(tfm, key, keylen);
807 
808 	return cipher->setkey(tfm, key, keylen);
809 }
810 
811 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
812 {
813 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
814 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
815 
816 	alg->exit(skcipher);
817 }
818 
819 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
820 {
821 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
822 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
823 
824 	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
825 		return crypto_init_skcipher_ops_blkcipher(tfm);
826 
827 	if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
828 	    tfm->__crt_alg->cra_type == &crypto_givcipher_type)
829 		return crypto_init_skcipher_ops_ablkcipher(tfm);
830 
831 	skcipher->setkey = skcipher_setkey;
832 	skcipher->encrypt = alg->encrypt;
833 	skcipher->decrypt = alg->decrypt;
834 	skcipher->ivsize = alg->ivsize;
835 	skcipher->keysize = alg->max_keysize;
836 
837 	if (alg->exit)
838 		skcipher->base.exit = crypto_skcipher_exit_tfm;
839 
840 	if (alg->init)
841 		return alg->init(skcipher);
842 
843 	return 0;
844 }
845 
846 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
847 {
848 	struct skcipher_instance *skcipher =
849 		container_of(inst, struct skcipher_instance, s.base);
850 
851 	skcipher->free(skcipher);
852 }
853 
854 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
855 	__maybe_unused;
856 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
857 {
858 	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
859 						     base);
860 
861 	seq_printf(m, "type         : skcipher\n");
862 	seq_printf(m, "async        : %s\n",
863 		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
864 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
865 	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
866 	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
867 	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
868 	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
869 	seq_printf(m, "walksize     : %u\n", skcipher->walksize);
870 }
871 
872 #ifdef CONFIG_NET
873 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
874 {
875 	struct crypto_report_blkcipher rblkcipher;
876 	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
877 						     base);
878 
879 	strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
880 	strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
881 
882 	rblkcipher.blocksize = alg->cra_blocksize;
883 	rblkcipher.min_keysize = skcipher->min_keysize;
884 	rblkcipher.max_keysize = skcipher->max_keysize;
885 	rblkcipher.ivsize = skcipher->ivsize;
886 
887 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
888 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
889 		goto nla_put_failure;
890 	return 0;
891 
892 nla_put_failure:
893 	return -EMSGSIZE;
894 }
895 #else
896 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
897 {
898 	return -ENOSYS;
899 }
900 #endif
901 
902 static const struct crypto_type crypto_skcipher_type2 = {
903 	.extsize = crypto_skcipher_extsize,
904 	.init_tfm = crypto_skcipher_init_tfm,
905 	.free = crypto_skcipher_free_instance,
906 #ifdef CONFIG_PROC_FS
907 	.show = crypto_skcipher_show,
908 #endif
909 	.report = crypto_skcipher_report,
910 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
911 	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
912 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
913 	.tfmsize = offsetof(struct crypto_skcipher, base),
914 };
915 
916 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
917 			  const char *name, u32 type, u32 mask)
918 {
919 	spawn->base.frontend = &crypto_skcipher_type2;
920 	return crypto_grab_spawn(&spawn->base, name, type, mask);
921 }
922 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
923 
924 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
925 					      u32 type, u32 mask)
926 {
927 	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
928 }
929 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
930 
931 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
932 {
933 	return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
934 				   type, mask);
935 }
936 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
937 
938 static int skcipher_prepare_alg(struct skcipher_alg *alg)
939 {
940 	struct crypto_alg *base = &alg->base;
941 
942 	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
943 	    alg->walksize > PAGE_SIZE / 8)
944 		return -EINVAL;
945 
946 	if (!alg->chunksize)
947 		alg->chunksize = base->cra_blocksize;
948 	if (!alg->walksize)
949 		alg->walksize = alg->chunksize;
950 
951 	base->cra_type = &crypto_skcipher_type2;
952 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
953 	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
954 
955 	return 0;
956 }
957 
958 int crypto_register_skcipher(struct skcipher_alg *alg)
959 {
960 	struct crypto_alg *base = &alg->base;
961 	int err;
962 
963 	err = skcipher_prepare_alg(alg);
964 	if (err)
965 		return err;
966 
967 	return crypto_register_alg(base);
968 }
969 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
970 
971 void crypto_unregister_skcipher(struct skcipher_alg *alg)
972 {
973 	crypto_unregister_alg(&alg->base);
974 }
975 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
976 
977 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
978 {
979 	int i, ret;
980 
981 	for (i = 0; i < count; i++) {
982 		ret = crypto_register_skcipher(&algs[i]);
983 		if (ret)
984 			goto err;
985 	}
986 
987 	return 0;
988 
989 err:
990 	for (--i; i >= 0; --i)
991 		crypto_unregister_skcipher(&algs[i]);
992 
993 	return ret;
994 }
995 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
996 
997 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
998 {
999 	int i;
1000 
1001 	for (i = count - 1; i >= 0; --i)
1002 		crypto_unregister_skcipher(&algs[i]);
1003 }
1004 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1005 
1006 int skcipher_register_instance(struct crypto_template *tmpl,
1007 			   struct skcipher_instance *inst)
1008 {
1009 	int err;
1010 
1011 	err = skcipher_prepare_alg(&inst->alg);
1012 	if (err)
1013 		return err;
1014 
1015 	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1016 }
1017 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1018 
1019 MODULE_LICENSE("GPL");
1020 MODULE_DESCRIPTION("Symmetric key cipher type");
1021