xref: /openbmc/linux/crypto/skcipher.c (revision 8730046c)
1 /*
2  * Symmetric key cipher operations.
3  *
4  * Generic encrypt/decrypt wrapper for ciphers, handles operations across
5  * multiple page boundaries by using temporary blocks.  In user context,
6  * the kernel is given a chance to schedule us once per page.
7  *
8  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License as published by the Free
12  * Software Foundation; either version 2 of the License, or (at your option)
13  * any later version.
14  *
15  */
16 
17 #include <crypto/internal/aead.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/bug.h>
21 #include <linux/cryptouser.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/seq_file.h>
26 #include <net/netlink.h>
27 
28 #include "internal.h"
29 
30 enum {
31 	SKCIPHER_WALK_PHYS = 1 << 0,
32 	SKCIPHER_WALK_SLOW = 1 << 1,
33 	SKCIPHER_WALK_COPY = 1 << 2,
34 	SKCIPHER_WALK_DIFF = 1 << 3,
35 	SKCIPHER_WALK_SLEEP = 1 << 4,
36 };
37 
38 struct skcipher_walk_buffer {
39 	struct list_head entry;
40 	struct scatter_walk dst;
41 	unsigned int len;
42 	u8 *data;
43 	u8 buffer[];
44 };
45 
46 static int skcipher_walk_next(struct skcipher_walk *walk);
47 
48 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr)
49 {
50 	if (PageHighMem(scatterwalk_page(walk)))
51 		kunmap_atomic(vaddr);
52 }
53 
54 static inline void *skcipher_map(struct scatter_walk *walk)
55 {
56 	struct page *page = scatterwalk_page(walk);
57 
58 	return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) +
59 	       offset_in_page(walk->offset);
60 }
61 
62 static inline void skcipher_map_src(struct skcipher_walk *walk)
63 {
64 	walk->src.virt.addr = skcipher_map(&walk->in);
65 }
66 
67 static inline void skcipher_map_dst(struct skcipher_walk *walk)
68 {
69 	walk->dst.virt.addr = skcipher_map(&walk->out);
70 }
71 
72 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
73 {
74 	skcipher_unmap(&walk->in, walk->src.virt.addr);
75 }
76 
77 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
78 {
79 	skcipher_unmap(&walk->out, walk->dst.virt.addr);
80 }
81 
82 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
83 {
84 	return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
85 }
86 
87 /* Get a spot of the specified length that does not straddle a page.
88  * The caller needs to ensure that there is enough space for this operation.
89  */
90 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
91 {
92 	u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
93 
94 	return max(start, end_page);
95 }
96 
97 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
98 {
99 	u8 *addr;
100 
101 	addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
102 	addr = skcipher_get_spot(addr, bsize);
103 	scatterwalk_copychunks(addr, &walk->out, bsize,
104 			       (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
105 	return 0;
106 }
107 
108 int skcipher_walk_done(struct skcipher_walk *walk, int err)
109 {
110 	unsigned int n = walk->nbytes - err;
111 	unsigned int nbytes;
112 
113 	nbytes = walk->total - n;
114 
115 	if (unlikely(err < 0)) {
116 		nbytes = 0;
117 		n = 0;
118 	} else if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
119 					   SKCIPHER_WALK_SLOW |
120 					   SKCIPHER_WALK_COPY |
121 					   SKCIPHER_WALK_DIFF)))) {
122 unmap_src:
123 		skcipher_unmap_src(walk);
124 	} else if (walk->flags & SKCIPHER_WALK_DIFF) {
125 		skcipher_unmap_dst(walk);
126 		goto unmap_src;
127 	} else if (walk->flags & SKCIPHER_WALK_COPY) {
128 		skcipher_map_dst(walk);
129 		memcpy(walk->dst.virt.addr, walk->page, n);
130 		skcipher_unmap_dst(walk);
131 	} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
132 		if (WARN_ON(err)) {
133 			err = -EINVAL;
134 			nbytes = 0;
135 		} else
136 			n = skcipher_done_slow(walk, n);
137 	}
138 
139 	if (err > 0)
140 		err = 0;
141 
142 	walk->total = nbytes;
143 	walk->nbytes = nbytes;
144 
145 	scatterwalk_advance(&walk->in, n);
146 	scatterwalk_advance(&walk->out, n);
147 	scatterwalk_done(&walk->in, 0, nbytes);
148 	scatterwalk_done(&walk->out, 1, nbytes);
149 
150 	if (nbytes) {
151 		crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
152 			     CRYPTO_TFM_REQ_MAY_SLEEP : 0);
153 		return skcipher_walk_next(walk);
154 	}
155 
156 	/* Short-circuit for the common/fast path. */
157 	if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
158 		goto out;
159 
160 	if (walk->flags & SKCIPHER_WALK_PHYS)
161 		goto out;
162 
163 	if (walk->iv != walk->oiv)
164 		memcpy(walk->oiv, walk->iv, walk->ivsize);
165 	if (walk->buffer != walk->page)
166 		kfree(walk->buffer);
167 	if (walk->page)
168 		free_page((unsigned long)walk->page);
169 
170 out:
171 	return err;
172 }
173 EXPORT_SYMBOL_GPL(skcipher_walk_done);
174 
175 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
176 {
177 	struct skcipher_walk_buffer *p, *tmp;
178 
179 	list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
180 		u8 *data;
181 
182 		if (err)
183 			goto done;
184 
185 		data = p->data;
186 		if (!data) {
187 			data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
188 			data = skcipher_get_spot(data, walk->chunksize);
189 		}
190 
191 		scatterwalk_copychunks(data, &p->dst, p->len, 1);
192 
193 		if (offset_in_page(p->data) + p->len + walk->chunksize >
194 		    PAGE_SIZE)
195 			free_page((unsigned long)p->data);
196 
197 done:
198 		list_del(&p->entry);
199 		kfree(p);
200 	}
201 
202 	if (!err && walk->iv != walk->oiv)
203 		memcpy(walk->oiv, walk->iv, walk->ivsize);
204 	if (walk->buffer != walk->page)
205 		kfree(walk->buffer);
206 	if (walk->page)
207 		free_page((unsigned long)walk->page);
208 }
209 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
210 
211 static void skcipher_queue_write(struct skcipher_walk *walk,
212 				 struct skcipher_walk_buffer *p)
213 {
214 	p->dst = walk->out;
215 	list_add_tail(&p->entry, &walk->buffers);
216 }
217 
218 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
219 {
220 	bool phys = walk->flags & SKCIPHER_WALK_PHYS;
221 	unsigned alignmask = walk->alignmask;
222 	struct skcipher_walk_buffer *p;
223 	unsigned a;
224 	unsigned n;
225 	u8 *buffer;
226 	void *v;
227 
228 	if (!phys) {
229 		if (!walk->buffer)
230 			walk->buffer = walk->page;
231 		buffer = walk->buffer;
232 		if (buffer)
233 			goto ok;
234 	}
235 
236 	/* Start with the minimum alignment of kmalloc. */
237 	a = crypto_tfm_ctx_alignment() - 1;
238 	n = bsize;
239 
240 	if (phys) {
241 		/* Calculate the minimum alignment of p->buffer. */
242 		a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
243 		n += sizeof(*p);
244 	}
245 
246 	/* Minimum size to align p->buffer by alignmask. */
247 	n += alignmask & ~a;
248 
249 	/* Minimum size to ensure p->buffer does not straddle a page. */
250 	n += (bsize - 1) & ~(alignmask | a);
251 
252 	v = kzalloc(n, skcipher_walk_gfp(walk));
253 	if (!v)
254 		return skcipher_walk_done(walk, -ENOMEM);
255 
256 	if (phys) {
257 		p = v;
258 		p->len = bsize;
259 		skcipher_queue_write(walk, p);
260 		buffer = p->buffer;
261 	} else {
262 		walk->buffer = v;
263 		buffer = v;
264 	}
265 
266 ok:
267 	walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
268 	walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
269 	walk->src.virt.addr = walk->dst.virt.addr;
270 
271 	scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
272 
273 	walk->nbytes = bsize;
274 	walk->flags |= SKCIPHER_WALK_SLOW;
275 
276 	return 0;
277 }
278 
279 static int skcipher_next_copy(struct skcipher_walk *walk)
280 {
281 	struct skcipher_walk_buffer *p;
282 	u8 *tmp = walk->page;
283 
284 	skcipher_map_src(walk);
285 	memcpy(tmp, walk->src.virt.addr, walk->nbytes);
286 	skcipher_unmap_src(walk);
287 
288 	walk->src.virt.addr = tmp;
289 	walk->dst.virt.addr = tmp;
290 
291 	if (!(walk->flags & SKCIPHER_WALK_PHYS))
292 		return 0;
293 
294 	p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
295 	if (!p)
296 		return -ENOMEM;
297 
298 	p->data = walk->page;
299 	p->len = walk->nbytes;
300 	skcipher_queue_write(walk, p);
301 
302 	if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize >
303 	    PAGE_SIZE)
304 		walk->page = NULL;
305 	else
306 		walk->page += walk->nbytes;
307 
308 	return 0;
309 }
310 
311 static int skcipher_next_fast(struct skcipher_walk *walk)
312 {
313 	unsigned long diff;
314 
315 	walk->src.phys.page = scatterwalk_page(&walk->in);
316 	walk->src.phys.offset = offset_in_page(walk->in.offset);
317 	walk->dst.phys.page = scatterwalk_page(&walk->out);
318 	walk->dst.phys.offset = offset_in_page(walk->out.offset);
319 
320 	if (walk->flags & SKCIPHER_WALK_PHYS)
321 		return 0;
322 
323 	diff = walk->src.phys.offset - walk->dst.phys.offset;
324 	diff |= walk->src.virt.page - walk->dst.virt.page;
325 
326 	skcipher_map_src(walk);
327 	walk->dst.virt.addr = walk->src.virt.addr;
328 
329 	if (diff) {
330 		walk->flags |= SKCIPHER_WALK_DIFF;
331 		skcipher_map_dst(walk);
332 	}
333 
334 	return 0;
335 }
336 
337 static int skcipher_walk_next(struct skcipher_walk *walk)
338 {
339 	unsigned int bsize;
340 	unsigned int n;
341 	int err;
342 
343 	walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
344 			 SKCIPHER_WALK_DIFF);
345 
346 	n = walk->total;
347 	bsize = min(walk->chunksize, max(n, walk->blocksize));
348 	n = scatterwalk_clamp(&walk->in, n);
349 	n = scatterwalk_clamp(&walk->out, n);
350 
351 	if (unlikely(n < bsize)) {
352 		if (unlikely(walk->total < walk->blocksize))
353 			return skcipher_walk_done(walk, -EINVAL);
354 
355 slow_path:
356 		err = skcipher_next_slow(walk, bsize);
357 		goto set_phys_lowmem;
358 	}
359 
360 	if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
361 		if (!walk->page) {
362 			gfp_t gfp = skcipher_walk_gfp(walk);
363 
364 			walk->page = (void *)__get_free_page(gfp);
365 			if (!walk->page)
366 				goto slow_path;
367 		}
368 
369 		walk->nbytes = min_t(unsigned, n,
370 				     PAGE_SIZE - offset_in_page(walk->page));
371 		walk->flags |= SKCIPHER_WALK_COPY;
372 		err = skcipher_next_copy(walk);
373 		goto set_phys_lowmem;
374 	}
375 
376 	walk->nbytes = n;
377 
378 	return skcipher_next_fast(walk);
379 
380 set_phys_lowmem:
381 	if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
382 		walk->src.phys.page = virt_to_page(walk->src.virt.addr);
383 		walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
384 		walk->src.phys.offset &= PAGE_SIZE - 1;
385 		walk->dst.phys.offset &= PAGE_SIZE - 1;
386 	}
387 	return err;
388 }
389 EXPORT_SYMBOL_GPL(skcipher_walk_next);
390 
391 static int skcipher_copy_iv(struct skcipher_walk *walk)
392 {
393 	unsigned a = crypto_tfm_ctx_alignment() - 1;
394 	unsigned alignmask = walk->alignmask;
395 	unsigned ivsize = walk->ivsize;
396 	unsigned bs = walk->chunksize;
397 	unsigned aligned_bs;
398 	unsigned size;
399 	u8 *iv;
400 
401 	aligned_bs = ALIGN(bs, alignmask);
402 
403 	/* Minimum size to align buffer by alignmask. */
404 	size = alignmask & ~a;
405 
406 	if (walk->flags & SKCIPHER_WALK_PHYS)
407 		size += ivsize;
408 	else {
409 		size += aligned_bs + ivsize;
410 
411 		/* Minimum size to ensure buffer does not straddle a page. */
412 		size += (bs - 1) & ~(alignmask | a);
413 	}
414 
415 	walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
416 	if (!walk->buffer)
417 		return -ENOMEM;
418 
419 	iv = PTR_ALIGN(walk->buffer, alignmask + 1);
420 	iv = skcipher_get_spot(iv, bs) + aligned_bs;
421 
422 	walk->iv = memcpy(iv, walk->iv, walk->ivsize);
423 	return 0;
424 }
425 
426 static int skcipher_walk_first(struct skcipher_walk *walk)
427 {
428 	walk->nbytes = 0;
429 
430 	if (WARN_ON_ONCE(in_irq()))
431 		return -EDEADLK;
432 
433 	if (unlikely(!walk->total))
434 		return 0;
435 
436 	walk->buffer = NULL;
437 	if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
438 		int err = skcipher_copy_iv(walk);
439 		if (err)
440 			return err;
441 	}
442 
443 	walk->page = NULL;
444 	walk->nbytes = walk->total;
445 
446 	return skcipher_walk_next(walk);
447 }
448 
449 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
450 				  struct skcipher_request *req)
451 {
452 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
453 
454 	scatterwalk_start(&walk->in, req->src);
455 	scatterwalk_start(&walk->out, req->dst);
456 
457 	walk->total = req->cryptlen;
458 	walk->iv = req->iv;
459 	walk->oiv = req->iv;
460 
461 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
462 	walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
463 		       SKCIPHER_WALK_SLEEP : 0;
464 
465 	walk->blocksize = crypto_skcipher_blocksize(tfm);
466 	walk->chunksize = crypto_skcipher_chunksize(tfm);
467 	walk->ivsize = crypto_skcipher_ivsize(tfm);
468 	walk->alignmask = crypto_skcipher_alignmask(tfm);
469 
470 	return skcipher_walk_first(walk);
471 }
472 
473 int skcipher_walk_virt(struct skcipher_walk *walk,
474 		       struct skcipher_request *req, bool atomic)
475 {
476 	int err;
477 
478 	walk->flags &= ~SKCIPHER_WALK_PHYS;
479 
480 	err = skcipher_walk_skcipher(walk, req);
481 
482 	walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
483 
484 	return err;
485 }
486 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
487 
488 void skcipher_walk_atomise(struct skcipher_walk *walk)
489 {
490 	walk->flags &= ~SKCIPHER_WALK_SLEEP;
491 }
492 EXPORT_SYMBOL_GPL(skcipher_walk_atomise);
493 
494 int skcipher_walk_async(struct skcipher_walk *walk,
495 			struct skcipher_request *req)
496 {
497 	walk->flags |= SKCIPHER_WALK_PHYS;
498 
499 	INIT_LIST_HEAD(&walk->buffers);
500 
501 	return skcipher_walk_skcipher(walk, req);
502 }
503 EXPORT_SYMBOL_GPL(skcipher_walk_async);
504 
505 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
506 				     struct aead_request *req, bool atomic)
507 {
508 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
509 	int err;
510 
511 	walk->flags &= ~SKCIPHER_WALK_PHYS;
512 
513 	scatterwalk_start(&walk->in, req->src);
514 	scatterwalk_start(&walk->out, req->dst);
515 
516 	scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
517 	scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
518 
519 	walk->iv = req->iv;
520 	walk->oiv = req->iv;
521 
522 	if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
523 		walk->flags |= SKCIPHER_WALK_SLEEP;
524 	else
525 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
526 
527 	walk->blocksize = crypto_aead_blocksize(tfm);
528 	walk->chunksize = crypto_aead_chunksize(tfm);
529 	walk->ivsize = crypto_aead_ivsize(tfm);
530 	walk->alignmask = crypto_aead_alignmask(tfm);
531 
532 	err = skcipher_walk_first(walk);
533 
534 	if (atomic)
535 		walk->flags &= ~SKCIPHER_WALK_SLEEP;
536 
537 	return err;
538 }
539 
540 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
541 		       bool atomic)
542 {
543 	walk->total = req->cryptlen;
544 
545 	return skcipher_walk_aead_common(walk, req, atomic);
546 }
547 EXPORT_SYMBOL_GPL(skcipher_walk_aead);
548 
549 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
550 			       struct aead_request *req, bool atomic)
551 {
552 	walk->total = req->cryptlen;
553 
554 	return skcipher_walk_aead_common(walk, req, atomic);
555 }
556 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
557 
558 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
559 			       struct aead_request *req, bool atomic)
560 {
561 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
562 
563 	walk->total = req->cryptlen - crypto_aead_authsize(tfm);
564 
565 	return skcipher_walk_aead_common(walk, req, atomic);
566 }
567 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
568 
569 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
570 {
571 	if (alg->cra_type == &crypto_blkcipher_type)
572 		return sizeof(struct crypto_blkcipher *);
573 
574 	if (alg->cra_type == &crypto_ablkcipher_type ||
575 	    alg->cra_type == &crypto_givcipher_type)
576 		return sizeof(struct crypto_ablkcipher *);
577 
578 	return crypto_alg_extsize(alg);
579 }
580 
581 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
582 				     const u8 *key, unsigned int keylen)
583 {
584 	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
585 	struct crypto_blkcipher *blkcipher = *ctx;
586 	int err;
587 
588 	crypto_blkcipher_clear_flags(blkcipher, ~0);
589 	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
590 					      CRYPTO_TFM_REQ_MASK);
591 	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
592 	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
593 				       CRYPTO_TFM_RES_MASK);
594 
595 	return err;
596 }
597 
598 static int skcipher_crypt_blkcipher(struct skcipher_request *req,
599 				    int (*crypt)(struct blkcipher_desc *,
600 						 struct scatterlist *,
601 						 struct scatterlist *,
602 						 unsigned int))
603 {
604 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
605 	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
606 	struct blkcipher_desc desc = {
607 		.tfm = *ctx,
608 		.info = req->iv,
609 		.flags = req->base.flags,
610 	};
611 
612 
613 	return crypt(&desc, req->dst, req->src, req->cryptlen);
614 }
615 
616 static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
617 {
618 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
619 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
620 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
621 
622 	return skcipher_crypt_blkcipher(req, alg->encrypt);
623 }
624 
625 static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
626 {
627 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
628 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
629 	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
630 
631 	return skcipher_crypt_blkcipher(req, alg->decrypt);
632 }
633 
634 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
635 {
636 	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
637 
638 	crypto_free_blkcipher(*ctx);
639 }
640 
641 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
642 {
643 	struct crypto_alg *calg = tfm->__crt_alg;
644 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
645 	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
646 	struct crypto_blkcipher *blkcipher;
647 	struct crypto_tfm *btfm;
648 
649 	if (!crypto_mod_get(calg))
650 		return -EAGAIN;
651 
652 	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
653 					CRYPTO_ALG_TYPE_MASK);
654 	if (IS_ERR(btfm)) {
655 		crypto_mod_put(calg);
656 		return PTR_ERR(btfm);
657 	}
658 
659 	blkcipher = __crypto_blkcipher_cast(btfm);
660 	*ctx = blkcipher;
661 	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
662 
663 	skcipher->setkey = skcipher_setkey_blkcipher;
664 	skcipher->encrypt = skcipher_encrypt_blkcipher;
665 	skcipher->decrypt = skcipher_decrypt_blkcipher;
666 
667 	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
668 	skcipher->keysize = calg->cra_blkcipher.max_keysize;
669 
670 	return 0;
671 }
672 
673 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
674 				      const u8 *key, unsigned int keylen)
675 {
676 	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
677 	struct crypto_ablkcipher *ablkcipher = *ctx;
678 	int err;
679 
680 	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
681 	crypto_ablkcipher_set_flags(ablkcipher,
682 				    crypto_skcipher_get_flags(tfm) &
683 				    CRYPTO_TFM_REQ_MASK);
684 	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
685 	crypto_skcipher_set_flags(tfm,
686 				  crypto_ablkcipher_get_flags(ablkcipher) &
687 				  CRYPTO_TFM_RES_MASK);
688 
689 	return err;
690 }
691 
692 static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
693 				     int (*crypt)(struct ablkcipher_request *))
694 {
695 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
696 	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
697 	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
698 
699 	ablkcipher_request_set_tfm(subreq, *ctx);
700 	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
701 					req->base.complete, req->base.data);
702 	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
703 				     req->iv);
704 
705 	return crypt(subreq);
706 }
707 
708 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
709 {
710 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
711 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
712 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
713 
714 	return skcipher_crypt_ablkcipher(req, alg->encrypt);
715 }
716 
717 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
718 {
719 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
720 	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
721 	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
722 
723 	return skcipher_crypt_ablkcipher(req, alg->decrypt);
724 }
725 
726 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
727 {
728 	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
729 
730 	crypto_free_ablkcipher(*ctx);
731 }
732 
733 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
734 {
735 	struct crypto_alg *calg = tfm->__crt_alg;
736 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
737 	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
738 	struct crypto_ablkcipher *ablkcipher;
739 	struct crypto_tfm *abtfm;
740 
741 	if (!crypto_mod_get(calg))
742 		return -EAGAIN;
743 
744 	abtfm = __crypto_alloc_tfm(calg, 0, 0);
745 	if (IS_ERR(abtfm)) {
746 		crypto_mod_put(calg);
747 		return PTR_ERR(abtfm);
748 	}
749 
750 	ablkcipher = __crypto_ablkcipher_cast(abtfm);
751 	*ctx = ablkcipher;
752 	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
753 
754 	skcipher->setkey = skcipher_setkey_ablkcipher;
755 	skcipher->encrypt = skcipher_encrypt_ablkcipher;
756 	skcipher->decrypt = skcipher_decrypt_ablkcipher;
757 
758 	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
759 	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
760 			    sizeof(struct ablkcipher_request);
761 	skcipher->keysize = calg->cra_ablkcipher.max_keysize;
762 
763 	return 0;
764 }
765 
766 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
767 {
768 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
769 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
770 
771 	alg->exit(skcipher);
772 }
773 
774 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
775 {
776 	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
777 	struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
778 
779 	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
780 		return crypto_init_skcipher_ops_blkcipher(tfm);
781 
782 	if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
783 	    tfm->__crt_alg->cra_type == &crypto_givcipher_type)
784 		return crypto_init_skcipher_ops_ablkcipher(tfm);
785 
786 	skcipher->setkey = alg->setkey;
787 	skcipher->encrypt = alg->encrypt;
788 	skcipher->decrypt = alg->decrypt;
789 	skcipher->ivsize = alg->ivsize;
790 	skcipher->keysize = alg->max_keysize;
791 
792 	if (alg->exit)
793 		skcipher->base.exit = crypto_skcipher_exit_tfm;
794 
795 	if (alg->init)
796 		return alg->init(skcipher);
797 
798 	return 0;
799 }
800 
801 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
802 {
803 	struct skcipher_instance *skcipher =
804 		container_of(inst, struct skcipher_instance, s.base);
805 
806 	skcipher->free(skcipher);
807 }
808 
809 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
810 	__attribute__ ((unused));
811 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
812 {
813 	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
814 						     base);
815 
816 	seq_printf(m, "type         : skcipher\n");
817 	seq_printf(m, "async        : %s\n",
818 		   alg->cra_flags & CRYPTO_ALG_ASYNC ?  "yes" : "no");
819 	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
820 	seq_printf(m, "min keysize  : %u\n", skcipher->min_keysize);
821 	seq_printf(m, "max keysize  : %u\n", skcipher->max_keysize);
822 	seq_printf(m, "ivsize       : %u\n", skcipher->ivsize);
823 	seq_printf(m, "chunksize    : %u\n", skcipher->chunksize);
824 }
825 
826 #ifdef CONFIG_NET
827 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
828 {
829 	struct crypto_report_blkcipher rblkcipher;
830 	struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
831 						     base);
832 
833 	strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
834 	strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
835 
836 	rblkcipher.blocksize = alg->cra_blocksize;
837 	rblkcipher.min_keysize = skcipher->min_keysize;
838 	rblkcipher.max_keysize = skcipher->max_keysize;
839 	rblkcipher.ivsize = skcipher->ivsize;
840 
841 	if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
842 		    sizeof(struct crypto_report_blkcipher), &rblkcipher))
843 		goto nla_put_failure;
844 	return 0;
845 
846 nla_put_failure:
847 	return -EMSGSIZE;
848 }
849 #else
850 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
851 {
852 	return -ENOSYS;
853 }
854 #endif
855 
856 static const struct crypto_type crypto_skcipher_type2 = {
857 	.extsize = crypto_skcipher_extsize,
858 	.init_tfm = crypto_skcipher_init_tfm,
859 	.free = crypto_skcipher_free_instance,
860 #ifdef CONFIG_PROC_FS
861 	.show = crypto_skcipher_show,
862 #endif
863 	.report = crypto_skcipher_report,
864 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
865 	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
866 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
867 	.tfmsize = offsetof(struct crypto_skcipher, base),
868 };
869 
870 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
871 			  const char *name, u32 type, u32 mask)
872 {
873 	spawn->base.frontend = &crypto_skcipher_type2;
874 	return crypto_grab_spawn(&spawn->base, name, type, mask);
875 }
876 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
877 
878 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
879 					      u32 type, u32 mask)
880 {
881 	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
882 }
883 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
884 
885 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
886 {
887 	return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
888 				   type, mask);
889 }
890 EXPORT_SYMBOL_GPL(crypto_has_skcipher2);
891 
892 static int skcipher_prepare_alg(struct skcipher_alg *alg)
893 {
894 	struct crypto_alg *base = &alg->base;
895 
896 	if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8)
897 		return -EINVAL;
898 
899 	if (!alg->chunksize)
900 		alg->chunksize = base->cra_blocksize;
901 
902 	base->cra_type = &crypto_skcipher_type2;
903 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
904 	base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
905 
906 	return 0;
907 }
908 
909 int crypto_register_skcipher(struct skcipher_alg *alg)
910 {
911 	struct crypto_alg *base = &alg->base;
912 	int err;
913 
914 	err = skcipher_prepare_alg(alg);
915 	if (err)
916 		return err;
917 
918 	return crypto_register_alg(base);
919 }
920 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
921 
922 void crypto_unregister_skcipher(struct skcipher_alg *alg)
923 {
924 	crypto_unregister_alg(&alg->base);
925 }
926 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
927 
928 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
929 {
930 	int i, ret;
931 
932 	for (i = 0; i < count; i++) {
933 		ret = crypto_register_skcipher(&algs[i]);
934 		if (ret)
935 			goto err;
936 	}
937 
938 	return 0;
939 
940 err:
941 	for (--i; i >= 0; --i)
942 		crypto_unregister_skcipher(&algs[i]);
943 
944 	return ret;
945 }
946 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
947 
948 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
949 {
950 	int i;
951 
952 	for (i = count - 1; i >= 0; --i)
953 		crypto_unregister_skcipher(&algs[i]);
954 }
955 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
956 
957 int skcipher_register_instance(struct crypto_template *tmpl,
958 			   struct skcipher_instance *inst)
959 {
960 	int err;
961 
962 	err = skcipher_prepare_alg(&inst->alg);
963 	if (err)
964 		return err;
965 
966 	return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
967 }
968 EXPORT_SYMBOL_GPL(skcipher_register_instance);
969 
970 MODULE_LICENSE("GPL");
971 MODULE_DESCRIPTION("Symmetric key cipher type");
972