12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
27a7ffe65SHerbert Xu /*
37a7ffe65SHerbert Xu * Symmetric key cipher operations.
47a7ffe65SHerbert Xu *
57a7ffe65SHerbert Xu * Generic encrypt/decrypt wrapper for ciphers, handles operations across
67a7ffe65SHerbert Xu * multiple page boundaries by using temporary blocks. In user context,
77a7ffe65SHerbert Xu * the kernel is given a chance to schedule us once per page.
87a7ffe65SHerbert Xu *
97a7ffe65SHerbert Xu * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
107a7ffe65SHerbert Xu */
117a7ffe65SHerbert Xu
12b286d8b1SHerbert Xu #include <crypto/internal/aead.h>
130eb76ba2SArd Biesheuvel #include <crypto/internal/cipher.h>
147a7ffe65SHerbert Xu #include <crypto/internal/skcipher.h>
15b286d8b1SHerbert Xu #include <crypto/scatterwalk.h>
167a7ffe65SHerbert Xu #include <linux/bug.h>
174e6c3df4SHerbert Xu #include <linux/cryptouser.h>
181085680bSHerbert Xu #include <linux/err.h>
191085680bSHerbert Xu #include <linux/kernel.h>
20b286d8b1SHerbert Xu #include <linux/list.h>
211085680bSHerbert Xu #include <linux/mm.h>
227a7ffe65SHerbert Xu #include <linux/module.h>
234e6c3df4SHerbert Xu #include <linux/seq_file.h>
241085680bSHerbert Xu #include <linux/slab.h>
251085680bSHerbert Xu #include <linux/string.h>
264e6c3df4SHerbert Xu #include <net/netlink.h>
277a7ffe65SHerbert Xu
287a7ffe65SHerbert Xu #include "internal.h"
297a7ffe65SHerbert Xu
30b286d8b1SHerbert Xu enum {
31b286d8b1SHerbert Xu SKCIPHER_WALK_PHYS = 1 << 0,
32b286d8b1SHerbert Xu SKCIPHER_WALK_SLOW = 1 << 1,
33b286d8b1SHerbert Xu SKCIPHER_WALK_COPY = 1 << 2,
34b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF = 1 << 3,
35b286d8b1SHerbert Xu SKCIPHER_WALK_SLEEP = 1 << 4,
36b286d8b1SHerbert Xu };
37b286d8b1SHerbert Xu
38b286d8b1SHerbert Xu struct skcipher_walk_buffer {
39b286d8b1SHerbert Xu struct list_head entry;
40b286d8b1SHerbert Xu struct scatter_walk dst;
41b286d8b1SHerbert Xu unsigned int len;
42b286d8b1SHerbert Xu u8 *data;
43b286d8b1SHerbert Xu u8 buffer[];
44b286d8b1SHerbert Xu };
45b286d8b1SHerbert Xu
46b286d8b1SHerbert Xu static int skcipher_walk_next(struct skcipher_walk *walk);
47b286d8b1SHerbert Xu
skcipher_map_src(struct skcipher_walk * walk)48b286d8b1SHerbert Xu static inline void skcipher_map_src(struct skcipher_walk *walk)
49b286d8b1SHerbert Xu {
50d07bd950SArd Biesheuvel walk->src.virt.addr = scatterwalk_map(&walk->in);
51b286d8b1SHerbert Xu }
52b286d8b1SHerbert Xu
skcipher_map_dst(struct skcipher_walk * walk)53b286d8b1SHerbert Xu static inline void skcipher_map_dst(struct skcipher_walk *walk)
54b286d8b1SHerbert Xu {
55d07bd950SArd Biesheuvel walk->dst.virt.addr = scatterwalk_map(&walk->out);
56b286d8b1SHerbert Xu }
57b286d8b1SHerbert Xu
skcipher_unmap_src(struct skcipher_walk * walk)58b286d8b1SHerbert Xu static inline void skcipher_unmap_src(struct skcipher_walk *walk)
59b286d8b1SHerbert Xu {
60d07bd950SArd Biesheuvel scatterwalk_unmap(walk->src.virt.addr);
61b286d8b1SHerbert Xu }
62b286d8b1SHerbert Xu
skcipher_unmap_dst(struct skcipher_walk * walk)63b286d8b1SHerbert Xu static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
64b286d8b1SHerbert Xu {
65d07bd950SArd Biesheuvel scatterwalk_unmap(walk->dst.virt.addr);
66b286d8b1SHerbert Xu }
67b286d8b1SHerbert Xu
skcipher_walk_gfp(struct skcipher_walk * walk)68b286d8b1SHerbert Xu static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
69b286d8b1SHerbert Xu {
70b286d8b1SHerbert Xu return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
71b286d8b1SHerbert Xu }
72b286d8b1SHerbert Xu
73b286d8b1SHerbert Xu /* Get a spot of the specified length that does not straddle a page.
74b286d8b1SHerbert Xu * The caller needs to ensure that there is enough space for this operation.
75b286d8b1SHerbert Xu */
skcipher_get_spot(u8 * start,unsigned int len)76b286d8b1SHerbert Xu static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
77b286d8b1SHerbert Xu {
78b286d8b1SHerbert Xu u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
79b286d8b1SHerbert Xu
80b286d8b1SHerbert Xu return max(start, end_page);
81b286d8b1SHerbert Xu }
82b286d8b1SHerbert Xu
__crypto_skcipher_alg(struct crypto_alg * alg)831085680bSHerbert Xu static inline struct skcipher_alg *__crypto_skcipher_alg(
841085680bSHerbert Xu struct crypto_alg *alg)
851085680bSHerbert Xu {
861085680bSHerbert Xu return container_of(alg, struct skcipher_alg, base);
871085680bSHerbert Xu }
881085680bSHerbert Xu
skcipher_get_stat(struct skcipher_alg * alg)891085680bSHerbert Xu static inline struct crypto_istat_cipher *skcipher_get_stat(
901085680bSHerbert Xu struct skcipher_alg *alg)
911085680bSHerbert Xu {
921085680bSHerbert Xu #ifdef CONFIG_CRYPTO_STATS
931085680bSHerbert Xu return &alg->stat;
941085680bSHerbert Xu #else
951085680bSHerbert Xu return NULL;
961085680bSHerbert Xu #endif
971085680bSHerbert Xu }
981085680bSHerbert Xu
crypto_skcipher_errstat(struct skcipher_alg * alg,int err)991085680bSHerbert Xu static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
1001085680bSHerbert Xu {
1011085680bSHerbert Xu struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
1021085680bSHerbert Xu
1031085680bSHerbert Xu if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
1041085680bSHerbert Xu return err;
1051085680bSHerbert Xu
1061085680bSHerbert Xu if (err && err != -EINPROGRESS && err != -EBUSY)
1071085680bSHerbert Xu atomic64_inc(&istat->err_cnt);
1081085680bSHerbert Xu
1091085680bSHerbert Xu return err;
1101085680bSHerbert Xu }
1111085680bSHerbert Xu
skcipher_done_slow(struct skcipher_walk * walk,unsigned int bsize)1120ba3c026SHerbert Xu static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
113b286d8b1SHerbert Xu {
114b286d8b1SHerbert Xu u8 *addr;
115b286d8b1SHerbert Xu
116b286d8b1SHerbert Xu addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
117b286d8b1SHerbert Xu addr = skcipher_get_spot(addr, bsize);
118b286d8b1SHerbert Xu scatterwalk_copychunks(addr, &walk->out, bsize,
119b286d8b1SHerbert Xu (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
1200ba3c026SHerbert Xu return 0;
121b286d8b1SHerbert Xu }
122b286d8b1SHerbert Xu
skcipher_walk_done(struct skcipher_walk * walk,int err)123b286d8b1SHerbert Xu int skcipher_walk_done(struct skcipher_walk *walk, int err)
124b286d8b1SHerbert Xu {
1250ba3c026SHerbert Xu unsigned int n = walk->nbytes;
1260ba3c026SHerbert Xu unsigned int nbytes = 0;
127b286d8b1SHerbert Xu
1280ba3c026SHerbert Xu if (!n)
1298088d3ddSEric Biggers goto finish;
130b286d8b1SHerbert Xu
1310ba3c026SHerbert Xu if (likely(err >= 0)) {
1320ba3c026SHerbert Xu n -= err;
1330ba3c026SHerbert Xu nbytes = walk->total - n;
1340ba3c026SHerbert Xu }
1358088d3ddSEric Biggers
1368088d3ddSEric Biggers if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
137b286d8b1SHerbert Xu SKCIPHER_WALK_SLOW |
138b286d8b1SHerbert Xu SKCIPHER_WALK_COPY |
139b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF)))) {
140b286d8b1SHerbert Xu unmap_src:
141b286d8b1SHerbert Xu skcipher_unmap_src(walk);
142b286d8b1SHerbert Xu } else if (walk->flags & SKCIPHER_WALK_DIFF) {
143b286d8b1SHerbert Xu skcipher_unmap_dst(walk);
144b286d8b1SHerbert Xu goto unmap_src;
145b286d8b1SHerbert Xu } else if (walk->flags & SKCIPHER_WALK_COPY) {
146b286d8b1SHerbert Xu skcipher_map_dst(walk);
147b286d8b1SHerbert Xu memcpy(walk->dst.virt.addr, walk->page, n);
148b286d8b1SHerbert Xu skcipher_unmap_dst(walk);
149b286d8b1SHerbert Xu } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
1500ba3c026SHerbert Xu if (err > 0) {
151dcaca01aSEric Biggers /*
152dcaca01aSEric Biggers * Didn't process all bytes. Either the algorithm is
153dcaca01aSEric Biggers * broken, or this was the last step and it turned out
154dcaca01aSEric Biggers * the message wasn't evenly divisible into blocks but
155dcaca01aSEric Biggers * the algorithm requires it.
156dcaca01aSEric Biggers */
157b286d8b1SHerbert Xu err = -EINVAL;
1580ba3c026SHerbert Xu nbytes = 0;
1590ba3c026SHerbert Xu } else
1600ba3c026SHerbert Xu n = skcipher_done_slow(walk, n);
161b286d8b1SHerbert Xu }
1620ba3c026SHerbert Xu
1630ba3c026SHerbert Xu if (err > 0)
1640ba3c026SHerbert Xu err = 0;
1650ba3c026SHerbert Xu
1660ba3c026SHerbert Xu walk->total = nbytes;
1670ba3c026SHerbert Xu walk->nbytes = 0;
168b286d8b1SHerbert Xu
169b286d8b1SHerbert Xu scatterwalk_advance(&walk->in, n);
170b286d8b1SHerbert Xu scatterwalk_advance(&walk->out, n);
1710ba3c026SHerbert Xu scatterwalk_done(&walk->in, 0, nbytes);
1720ba3c026SHerbert Xu scatterwalk_done(&walk->out, 1, nbytes);
173b286d8b1SHerbert Xu
1740ba3c026SHerbert Xu if (nbytes) {
175b286d8b1SHerbert Xu crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
176b286d8b1SHerbert Xu CRYPTO_TFM_REQ_MAY_SLEEP : 0);
177b286d8b1SHerbert Xu return skcipher_walk_next(walk);
178b286d8b1SHerbert Xu }
179b286d8b1SHerbert Xu
1800ba3c026SHerbert Xu finish:
181b286d8b1SHerbert Xu /* Short-circuit for the common/fast path. */
182b286d8b1SHerbert Xu if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
183b286d8b1SHerbert Xu goto out;
184b286d8b1SHerbert Xu
185b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS)
186b286d8b1SHerbert Xu goto out;
187b286d8b1SHerbert Xu
188b286d8b1SHerbert Xu if (walk->iv != walk->oiv)
189b286d8b1SHerbert Xu memcpy(walk->oiv, walk->iv, walk->ivsize);
190b286d8b1SHerbert Xu if (walk->buffer != walk->page)
191b286d8b1SHerbert Xu kfree(walk->buffer);
192b286d8b1SHerbert Xu if (walk->page)
193b286d8b1SHerbert Xu free_page((unsigned long)walk->page);
194b286d8b1SHerbert Xu
195b286d8b1SHerbert Xu out:
196b286d8b1SHerbert Xu return err;
197b286d8b1SHerbert Xu }
198b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_done);
199b286d8b1SHerbert Xu
skcipher_walk_complete(struct skcipher_walk * walk,int err)200b286d8b1SHerbert Xu void skcipher_walk_complete(struct skcipher_walk *walk, int err)
201b286d8b1SHerbert Xu {
202b286d8b1SHerbert Xu struct skcipher_walk_buffer *p, *tmp;
203b286d8b1SHerbert Xu
204b286d8b1SHerbert Xu list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
205b286d8b1SHerbert Xu u8 *data;
206b286d8b1SHerbert Xu
207b286d8b1SHerbert Xu if (err)
208b286d8b1SHerbert Xu goto done;
209b286d8b1SHerbert Xu
210b286d8b1SHerbert Xu data = p->data;
211b286d8b1SHerbert Xu if (!data) {
212b286d8b1SHerbert Xu data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
213c821f6abSArd Biesheuvel data = skcipher_get_spot(data, walk->stride);
214b286d8b1SHerbert Xu }
215b286d8b1SHerbert Xu
216b286d8b1SHerbert Xu scatterwalk_copychunks(data, &p->dst, p->len, 1);
217b286d8b1SHerbert Xu
218c821f6abSArd Biesheuvel if (offset_in_page(p->data) + p->len + walk->stride >
219b286d8b1SHerbert Xu PAGE_SIZE)
220b286d8b1SHerbert Xu free_page((unsigned long)p->data);
221b286d8b1SHerbert Xu
222b286d8b1SHerbert Xu done:
223b286d8b1SHerbert Xu list_del(&p->entry);
224b286d8b1SHerbert Xu kfree(p);
225b286d8b1SHerbert Xu }
226b286d8b1SHerbert Xu
227b286d8b1SHerbert Xu if (!err && walk->iv != walk->oiv)
228b286d8b1SHerbert Xu memcpy(walk->oiv, walk->iv, walk->ivsize);
229b286d8b1SHerbert Xu if (walk->buffer != walk->page)
230b286d8b1SHerbert Xu kfree(walk->buffer);
231b286d8b1SHerbert Xu if (walk->page)
232b286d8b1SHerbert Xu free_page((unsigned long)walk->page);
233b286d8b1SHerbert Xu }
234b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_complete);
235b286d8b1SHerbert Xu
skcipher_queue_write(struct skcipher_walk * walk,struct skcipher_walk_buffer * p)236b286d8b1SHerbert Xu static void skcipher_queue_write(struct skcipher_walk *walk,
237b286d8b1SHerbert Xu struct skcipher_walk_buffer *p)
238b286d8b1SHerbert Xu {
239b286d8b1SHerbert Xu p->dst = walk->out;
240b286d8b1SHerbert Xu list_add_tail(&p->entry, &walk->buffers);
241b286d8b1SHerbert Xu }
242b286d8b1SHerbert Xu
skcipher_next_slow(struct skcipher_walk * walk,unsigned int bsize)243b286d8b1SHerbert Xu static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
244b286d8b1SHerbert Xu {
245b286d8b1SHerbert Xu bool phys = walk->flags & SKCIPHER_WALK_PHYS;
246b286d8b1SHerbert Xu unsigned alignmask = walk->alignmask;
247b286d8b1SHerbert Xu struct skcipher_walk_buffer *p;
248b286d8b1SHerbert Xu unsigned a;
249b286d8b1SHerbert Xu unsigned n;
250b286d8b1SHerbert Xu u8 *buffer;
251b286d8b1SHerbert Xu void *v;
252b286d8b1SHerbert Xu
253b286d8b1SHerbert Xu if (!phys) {
25418e615adSArd Biesheuvel if (!walk->buffer)
25518e615adSArd Biesheuvel walk->buffer = walk->page;
25618e615adSArd Biesheuvel buffer = walk->buffer;
257b286d8b1SHerbert Xu if (buffer)
258b286d8b1SHerbert Xu goto ok;
259b286d8b1SHerbert Xu }
260b286d8b1SHerbert Xu
261b286d8b1SHerbert Xu /* Start with the minimum alignment of kmalloc. */
262b286d8b1SHerbert Xu a = crypto_tfm_ctx_alignment() - 1;
263b286d8b1SHerbert Xu n = bsize;
264b286d8b1SHerbert Xu
265b286d8b1SHerbert Xu if (phys) {
266b286d8b1SHerbert Xu /* Calculate the minimum alignment of p->buffer. */
267b286d8b1SHerbert Xu a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
268b286d8b1SHerbert Xu n += sizeof(*p);
269b286d8b1SHerbert Xu }
270b286d8b1SHerbert Xu
271b286d8b1SHerbert Xu /* Minimum size to align p->buffer by alignmask. */
272b286d8b1SHerbert Xu n += alignmask & ~a;
273b286d8b1SHerbert Xu
274b286d8b1SHerbert Xu /* Minimum size to ensure p->buffer does not straddle a page. */
275b286d8b1SHerbert Xu n += (bsize - 1) & ~(alignmask | a);
276b286d8b1SHerbert Xu
277b286d8b1SHerbert Xu v = kzalloc(n, skcipher_walk_gfp(walk));
278b286d8b1SHerbert Xu if (!v)
279b286d8b1SHerbert Xu return skcipher_walk_done(walk, -ENOMEM);
280b286d8b1SHerbert Xu
281b286d8b1SHerbert Xu if (phys) {
282b286d8b1SHerbert Xu p = v;
283b286d8b1SHerbert Xu p->len = bsize;
284b286d8b1SHerbert Xu skcipher_queue_write(walk, p);
285b286d8b1SHerbert Xu buffer = p->buffer;
286b286d8b1SHerbert Xu } else {
287b286d8b1SHerbert Xu walk->buffer = v;
288b286d8b1SHerbert Xu buffer = v;
289b286d8b1SHerbert Xu }
290b286d8b1SHerbert Xu
291b286d8b1SHerbert Xu ok:
292b286d8b1SHerbert Xu walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
293b286d8b1SHerbert Xu walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
294b286d8b1SHerbert Xu walk->src.virt.addr = walk->dst.virt.addr;
295b286d8b1SHerbert Xu
296b286d8b1SHerbert Xu scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
297b286d8b1SHerbert Xu
298b286d8b1SHerbert Xu walk->nbytes = bsize;
299b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_SLOW;
300b286d8b1SHerbert Xu
301b286d8b1SHerbert Xu return 0;
302b286d8b1SHerbert Xu }
303b286d8b1SHerbert Xu
skcipher_next_copy(struct skcipher_walk * walk)304b286d8b1SHerbert Xu static int skcipher_next_copy(struct skcipher_walk *walk)
305b286d8b1SHerbert Xu {
306b286d8b1SHerbert Xu struct skcipher_walk_buffer *p;
307b286d8b1SHerbert Xu u8 *tmp = walk->page;
308b286d8b1SHerbert Xu
309b286d8b1SHerbert Xu skcipher_map_src(walk);
310b286d8b1SHerbert Xu memcpy(tmp, walk->src.virt.addr, walk->nbytes);
311b286d8b1SHerbert Xu skcipher_unmap_src(walk);
312b286d8b1SHerbert Xu
313b286d8b1SHerbert Xu walk->src.virt.addr = tmp;
314b286d8b1SHerbert Xu walk->dst.virt.addr = tmp;
315b286d8b1SHerbert Xu
316b286d8b1SHerbert Xu if (!(walk->flags & SKCIPHER_WALK_PHYS))
317b286d8b1SHerbert Xu return 0;
318b286d8b1SHerbert Xu
319b286d8b1SHerbert Xu p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
320b286d8b1SHerbert Xu if (!p)
321b286d8b1SHerbert Xu return -ENOMEM;
322b286d8b1SHerbert Xu
323b286d8b1SHerbert Xu p->data = walk->page;
324b286d8b1SHerbert Xu p->len = walk->nbytes;
325b286d8b1SHerbert Xu skcipher_queue_write(walk, p);
326b286d8b1SHerbert Xu
327c821f6abSArd Biesheuvel if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
328b286d8b1SHerbert Xu PAGE_SIZE)
329b286d8b1SHerbert Xu walk->page = NULL;
330b286d8b1SHerbert Xu else
331b286d8b1SHerbert Xu walk->page += walk->nbytes;
332b286d8b1SHerbert Xu
333b286d8b1SHerbert Xu return 0;
334b286d8b1SHerbert Xu }
335b286d8b1SHerbert Xu
skcipher_next_fast(struct skcipher_walk * walk)336b286d8b1SHerbert Xu static int skcipher_next_fast(struct skcipher_walk *walk)
337b286d8b1SHerbert Xu {
338b286d8b1SHerbert Xu unsigned long diff;
339b286d8b1SHerbert Xu
340b286d8b1SHerbert Xu walk->src.phys.page = scatterwalk_page(&walk->in);
341b286d8b1SHerbert Xu walk->src.phys.offset = offset_in_page(walk->in.offset);
342b286d8b1SHerbert Xu walk->dst.phys.page = scatterwalk_page(&walk->out);
343b286d8b1SHerbert Xu walk->dst.phys.offset = offset_in_page(walk->out.offset);
344b286d8b1SHerbert Xu
345b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS)
346b286d8b1SHerbert Xu return 0;
347b286d8b1SHerbert Xu
348b286d8b1SHerbert Xu diff = walk->src.phys.offset - walk->dst.phys.offset;
349b286d8b1SHerbert Xu diff |= walk->src.virt.page - walk->dst.virt.page;
350b286d8b1SHerbert Xu
351b286d8b1SHerbert Xu skcipher_map_src(walk);
352b286d8b1SHerbert Xu walk->dst.virt.addr = walk->src.virt.addr;
353b286d8b1SHerbert Xu
354b286d8b1SHerbert Xu if (diff) {
355b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_DIFF;
356b286d8b1SHerbert Xu skcipher_map_dst(walk);
357b286d8b1SHerbert Xu }
358b286d8b1SHerbert Xu
359b286d8b1SHerbert Xu return 0;
360b286d8b1SHerbert Xu }
361b286d8b1SHerbert Xu
skcipher_walk_next(struct skcipher_walk * walk)362b286d8b1SHerbert Xu static int skcipher_walk_next(struct skcipher_walk *walk)
363b286d8b1SHerbert Xu {
364b286d8b1SHerbert Xu unsigned int bsize;
365b286d8b1SHerbert Xu unsigned int n;
366b286d8b1SHerbert Xu int err;
367b286d8b1SHerbert Xu
368b286d8b1SHerbert Xu walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
369b286d8b1SHerbert Xu SKCIPHER_WALK_DIFF);
370b286d8b1SHerbert Xu
371b286d8b1SHerbert Xu n = walk->total;
372c821f6abSArd Biesheuvel bsize = min(walk->stride, max(n, walk->blocksize));
373b286d8b1SHerbert Xu n = scatterwalk_clamp(&walk->in, n);
374b286d8b1SHerbert Xu n = scatterwalk_clamp(&walk->out, n);
375b286d8b1SHerbert Xu
376b286d8b1SHerbert Xu if (unlikely(n < bsize)) {
377b286d8b1SHerbert Xu if (unlikely(walk->total < walk->blocksize))
378b286d8b1SHerbert Xu return skcipher_walk_done(walk, -EINVAL);
379b286d8b1SHerbert Xu
380b286d8b1SHerbert Xu slow_path:
381b286d8b1SHerbert Xu err = skcipher_next_slow(walk, bsize);
382b286d8b1SHerbert Xu goto set_phys_lowmem;
383b286d8b1SHerbert Xu }
384b286d8b1SHerbert Xu
385b286d8b1SHerbert Xu if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
386b286d8b1SHerbert Xu if (!walk->page) {
387b286d8b1SHerbert Xu gfp_t gfp = skcipher_walk_gfp(walk);
388b286d8b1SHerbert Xu
389b286d8b1SHerbert Xu walk->page = (void *)__get_free_page(gfp);
390b286d8b1SHerbert Xu if (!walk->page)
391b286d8b1SHerbert Xu goto slow_path;
392b286d8b1SHerbert Xu }
393b286d8b1SHerbert Xu
394b286d8b1SHerbert Xu walk->nbytes = min_t(unsigned, n,
395b286d8b1SHerbert Xu PAGE_SIZE - offset_in_page(walk->page));
396b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_COPY;
397b286d8b1SHerbert Xu err = skcipher_next_copy(walk);
398b286d8b1SHerbert Xu goto set_phys_lowmem;
399b286d8b1SHerbert Xu }
400b286d8b1SHerbert Xu
401b286d8b1SHerbert Xu walk->nbytes = n;
402b286d8b1SHerbert Xu
403b286d8b1SHerbert Xu return skcipher_next_fast(walk);
404b286d8b1SHerbert Xu
405b286d8b1SHerbert Xu set_phys_lowmem:
406b286d8b1SHerbert Xu if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
407b286d8b1SHerbert Xu walk->src.phys.page = virt_to_page(walk->src.virt.addr);
408b286d8b1SHerbert Xu walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
409b286d8b1SHerbert Xu walk->src.phys.offset &= PAGE_SIZE - 1;
410b286d8b1SHerbert Xu walk->dst.phys.offset &= PAGE_SIZE - 1;
411b286d8b1SHerbert Xu }
412b286d8b1SHerbert Xu return err;
413b286d8b1SHerbert Xu }
414b286d8b1SHerbert Xu
skcipher_copy_iv(struct skcipher_walk * walk)415b286d8b1SHerbert Xu static int skcipher_copy_iv(struct skcipher_walk *walk)
416b286d8b1SHerbert Xu {
417b286d8b1SHerbert Xu unsigned a = crypto_tfm_ctx_alignment() - 1;
418b286d8b1SHerbert Xu unsigned alignmask = walk->alignmask;
419b286d8b1SHerbert Xu unsigned ivsize = walk->ivsize;
420c821f6abSArd Biesheuvel unsigned bs = walk->stride;
421b286d8b1SHerbert Xu unsigned aligned_bs;
422b286d8b1SHerbert Xu unsigned size;
423b286d8b1SHerbert Xu u8 *iv;
424b286d8b1SHerbert Xu
4250567fc9eSEric Biggers aligned_bs = ALIGN(bs, alignmask + 1);
426b286d8b1SHerbert Xu
427b286d8b1SHerbert Xu /* Minimum size to align buffer by alignmask. */
428b286d8b1SHerbert Xu size = alignmask & ~a;
429b286d8b1SHerbert Xu
430b286d8b1SHerbert Xu if (walk->flags & SKCIPHER_WALK_PHYS)
431b286d8b1SHerbert Xu size += ivsize;
432b286d8b1SHerbert Xu else {
433b286d8b1SHerbert Xu size += aligned_bs + ivsize;
434b286d8b1SHerbert Xu
435b286d8b1SHerbert Xu /* Minimum size to ensure buffer does not straddle a page. */
436b286d8b1SHerbert Xu size += (bs - 1) & ~(alignmask | a);
437b286d8b1SHerbert Xu }
438b286d8b1SHerbert Xu
439b286d8b1SHerbert Xu walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
440b286d8b1SHerbert Xu if (!walk->buffer)
441b286d8b1SHerbert Xu return -ENOMEM;
442b286d8b1SHerbert Xu
443b286d8b1SHerbert Xu iv = PTR_ALIGN(walk->buffer, alignmask + 1);
444b286d8b1SHerbert Xu iv = skcipher_get_spot(iv, bs) + aligned_bs;
445b286d8b1SHerbert Xu
446b286d8b1SHerbert Xu walk->iv = memcpy(iv, walk->iv, walk->ivsize);
447b286d8b1SHerbert Xu return 0;
448b286d8b1SHerbert Xu }
449b286d8b1SHerbert Xu
skcipher_walk_first(struct skcipher_walk * walk)450b286d8b1SHerbert Xu static int skcipher_walk_first(struct skcipher_walk *walk)
451b286d8b1SHerbert Xu {
452abfc7fadSChangbin Du if (WARN_ON_ONCE(in_hardirq()))
453b286d8b1SHerbert Xu return -EDEADLK;
454b286d8b1SHerbert Xu
455b286d8b1SHerbert Xu walk->buffer = NULL;
456b286d8b1SHerbert Xu if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
457b286d8b1SHerbert Xu int err = skcipher_copy_iv(walk);
458b286d8b1SHerbert Xu if (err)
459b286d8b1SHerbert Xu return err;
460b286d8b1SHerbert Xu }
461b286d8b1SHerbert Xu
462b286d8b1SHerbert Xu walk->page = NULL;
463b286d8b1SHerbert Xu
464b286d8b1SHerbert Xu return skcipher_walk_next(walk);
465b286d8b1SHerbert Xu }
466b286d8b1SHerbert Xu
skcipher_walk_skcipher(struct skcipher_walk * walk,struct skcipher_request * req)467b286d8b1SHerbert Xu static int skcipher_walk_skcipher(struct skcipher_walk *walk,
468b286d8b1SHerbert Xu struct skcipher_request *req)
469b286d8b1SHerbert Xu {
470b286d8b1SHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
471b286d8b1SHerbert Xu
4720cabf2afSHerbert Xu walk->total = req->cryptlen;
4730cabf2afSHerbert Xu walk->nbytes = 0;
4742b4f27c3SEric Biggers walk->iv = req->iv;
4752b4f27c3SEric Biggers walk->oiv = req->iv;
4760cabf2afSHerbert Xu
4770cabf2afSHerbert Xu if (unlikely(!walk->total))
4780cabf2afSHerbert Xu return 0;
4790cabf2afSHerbert Xu
480b286d8b1SHerbert Xu scatterwalk_start(&walk->in, req->src);
481b286d8b1SHerbert Xu scatterwalk_start(&walk->out, req->dst);
482b286d8b1SHerbert Xu
483b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP;
484b286d8b1SHerbert Xu walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
485b286d8b1SHerbert Xu SKCIPHER_WALK_SLEEP : 0;
486b286d8b1SHerbert Xu
487b286d8b1SHerbert Xu walk->blocksize = crypto_skcipher_blocksize(tfm);
488c821f6abSArd Biesheuvel walk->stride = crypto_skcipher_walksize(tfm);
489b286d8b1SHerbert Xu walk->ivsize = crypto_skcipher_ivsize(tfm);
490b286d8b1SHerbert Xu walk->alignmask = crypto_skcipher_alignmask(tfm);
491b286d8b1SHerbert Xu
492b286d8b1SHerbert Xu return skcipher_walk_first(walk);
493b286d8b1SHerbert Xu }
494b286d8b1SHerbert Xu
skcipher_walk_virt(struct skcipher_walk * walk,struct skcipher_request * req,bool atomic)495b286d8b1SHerbert Xu int skcipher_walk_virt(struct skcipher_walk *walk,
496b286d8b1SHerbert Xu struct skcipher_request *req, bool atomic)
497b286d8b1SHerbert Xu {
498b286d8b1SHerbert Xu int err;
499b286d8b1SHerbert Xu
500bb648291SEric Biggers might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
501bb648291SEric Biggers
502b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_PHYS;
503b286d8b1SHerbert Xu
504b286d8b1SHerbert Xu err = skcipher_walk_skcipher(walk, req);
505b286d8b1SHerbert Xu
506b286d8b1SHerbert Xu walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
507b286d8b1SHerbert Xu
508b286d8b1SHerbert Xu return err;
509b286d8b1SHerbert Xu }
510b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_virt);
511b286d8b1SHerbert Xu
skcipher_walk_async(struct skcipher_walk * walk,struct skcipher_request * req)512b286d8b1SHerbert Xu int skcipher_walk_async(struct skcipher_walk *walk,
513b286d8b1SHerbert Xu struct skcipher_request *req)
514b286d8b1SHerbert Xu {
515b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_PHYS;
516b286d8b1SHerbert Xu
517b286d8b1SHerbert Xu INIT_LIST_HEAD(&walk->buffers);
518b286d8b1SHerbert Xu
519b286d8b1SHerbert Xu return skcipher_walk_skcipher(walk, req);
520b286d8b1SHerbert Xu }
521b286d8b1SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_async);
522b286d8b1SHerbert Xu
skcipher_walk_aead_common(struct skcipher_walk * walk,struct aead_request * req,bool atomic)52334bc085cSHerbert Xu static int skcipher_walk_aead_common(struct skcipher_walk *walk,
52434bc085cSHerbert Xu struct aead_request *req, bool atomic)
525b286d8b1SHerbert Xu {
526b286d8b1SHerbert Xu struct crypto_aead *tfm = crypto_aead_reqtfm(req);
527b286d8b1SHerbert Xu int err;
528b286d8b1SHerbert Xu
5290cabf2afSHerbert Xu walk->nbytes = 0;
5302b4f27c3SEric Biggers walk->iv = req->iv;
5312b4f27c3SEric Biggers walk->oiv = req->iv;
5320cabf2afSHerbert Xu
5330cabf2afSHerbert Xu if (unlikely(!walk->total))
5340cabf2afSHerbert Xu return 0;
5350cabf2afSHerbert Xu
5363cbf61fbSArd Biesheuvel walk->flags &= ~SKCIPHER_WALK_PHYS;
5373cbf61fbSArd Biesheuvel
538b286d8b1SHerbert Xu scatterwalk_start(&walk->in, req->src);
539b286d8b1SHerbert Xu scatterwalk_start(&walk->out, req->dst);
540b286d8b1SHerbert Xu
541b286d8b1SHerbert Xu scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
542b286d8b1SHerbert Xu scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
543b286d8b1SHerbert Xu
544c14ca838SOndrej Mosnáček scatterwalk_done(&walk->in, 0, walk->total);
545c14ca838SOndrej Mosnáček scatterwalk_done(&walk->out, 0, walk->total);
546c14ca838SOndrej Mosnáček
547b286d8b1SHerbert Xu if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
548b286d8b1SHerbert Xu walk->flags |= SKCIPHER_WALK_SLEEP;
549b286d8b1SHerbert Xu else
550b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP;
551b286d8b1SHerbert Xu
552b286d8b1SHerbert Xu walk->blocksize = crypto_aead_blocksize(tfm);
553c821f6abSArd Biesheuvel walk->stride = crypto_aead_chunksize(tfm);
554b286d8b1SHerbert Xu walk->ivsize = crypto_aead_ivsize(tfm);
555b286d8b1SHerbert Xu walk->alignmask = crypto_aead_alignmask(tfm);
556b286d8b1SHerbert Xu
557b286d8b1SHerbert Xu err = skcipher_walk_first(walk);
558b286d8b1SHerbert Xu
559b286d8b1SHerbert Xu if (atomic)
560b286d8b1SHerbert Xu walk->flags &= ~SKCIPHER_WALK_SLEEP;
561b286d8b1SHerbert Xu
562b286d8b1SHerbert Xu return err;
563b286d8b1SHerbert Xu }
56434bc085cSHerbert Xu
skcipher_walk_aead_encrypt(struct skcipher_walk * walk,struct aead_request * req,bool atomic)56534bc085cSHerbert Xu int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
56634bc085cSHerbert Xu struct aead_request *req, bool atomic)
56734bc085cSHerbert Xu {
56834bc085cSHerbert Xu walk->total = req->cryptlen;
56934bc085cSHerbert Xu
57034bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic);
57134bc085cSHerbert Xu }
57234bc085cSHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
57334bc085cSHerbert Xu
skcipher_walk_aead_decrypt(struct skcipher_walk * walk,struct aead_request * req,bool atomic)57434bc085cSHerbert Xu int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
57534bc085cSHerbert Xu struct aead_request *req, bool atomic)
57634bc085cSHerbert Xu {
57734bc085cSHerbert Xu struct crypto_aead *tfm = crypto_aead_reqtfm(req);
57834bc085cSHerbert Xu
57934bc085cSHerbert Xu walk->total = req->cryptlen - crypto_aead_authsize(tfm);
58034bc085cSHerbert Xu
58134bc085cSHerbert Xu return skcipher_walk_aead_common(walk, req, atomic);
58234bc085cSHerbert Xu }
58334bc085cSHerbert Xu EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
58434bc085cSHerbert Xu
skcipher_set_needkey(struct crypto_skcipher * tfm)585b1f6b4bfSEric Biggers static void skcipher_set_needkey(struct crypto_skcipher *tfm)
586b1f6b4bfSEric Biggers {
5879ac0d136SEric Biggers if (crypto_skcipher_max_keysize(tfm) != 0)
588b1f6b4bfSEric Biggers crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
589b1f6b4bfSEric Biggers }
590b1f6b4bfSEric Biggers
skcipher_setkey_unaligned(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)5919933e113SHerbert Xu static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
5929933e113SHerbert Xu const u8 *key, unsigned int keylen)
5939933e113SHerbert Xu {
5949933e113SHerbert Xu unsigned long alignmask = crypto_skcipher_alignmask(tfm);
5959933e113SHerbert Xu struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
5969933e113SHerbert Xu u8 *buffer, *alignbuffer;
5979933e113SHerbert Xu unsigned long absize;
5989933e113SHerbert Xu int ret;
5999933e113SHerbert Xu
6009933e113SHerbert Xu absize = keylen + alignmask;
6019933e113SHerbert Xu buffer = kmalloc(absize, GFP_ATOMIC);
6029933e113SHerbert Xu if (!buffer)
6039933e113SHerbert Xu return -ENOMEM;
6049933e113SHerbert Xu
6059933e113SHerbert Xu alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
6069933e113SHerbert Xu memcpy(alignbuffer, key, keylen);
6079933e113SHerbert Xu ret = cipher->setkey(tfm, alignbuffer, keylen);
608453431a5SWaiman Long kfree_sensitive(buffer);
6099933e113SHerbert Xu return ret;
6109933e113SHerbert Xu }
6119933e113SHerbert Xu
crypto_skcipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)61215252d94SEric Biggers int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
6139933e113SHerbert Xu unsigned int keylen)
6149933e113SHerbert Xu {
6159933e113SHerbert Xu struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
6169933e113SHerbert Xu unsigned long alignmask = crypto_skcipher_alignmask(tfm);
617f8d33facSEric Biggers int err;
6189933e113SHerbert Xu
619674f368aSEric Biggers if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
6209933e113SHerbert Xu return -EINVAL;
6219933e113SHerbert Xu
6229933e113SHerbert Xu if ((unsigned long)key & alignmask)
623f8d33facSEric Biggers err = skcipher_setkey_unaligned(tfm, key, keylen);
624f8d33facSEric Biggers else
625f8d33facSEric Biggers err = cipher->setkey(tfm, key, keylen);
6269933e113SHerbert Xu
627b1f6b4bfSEric Biggers if (unlikely(err)) {
628b1f6b4bfSEric Biggers skcipher_set_needkey(tfm);
629f8d33facSEric Biggers return err;
630b1f6b4bfSEric Biggers }
631f8d33facSEric Biggers
632f8d33facSEric Biggers crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
633f8d33facSEric Biggers return 0;
6349933e113SHerbert Xu }
63515252d94SEric Biggers EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
6369933e113SHerbert Xu
crypto_skcipher_encrypt(struct skcipher_request * req)63781bcbb1eSEric Biggers int crypto_skcipher_encrypt(struct skcipher_request *req)
63881bcbb1eSEric Biggers {
63981bcbb1eSEric Biggers struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
6401085680bSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
64181bcbb1eSEric Biggers int ret;
64281bcbb1eSEric Biggers
6431085680bSHerbert Xu if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
6441085680bSHerbert Xu struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
6451085680bSHerbert Xu
6461085680bSHerbert Xu atomic64_inc(&istat->encrypt_cnt);
6471085680bSHerbert Xu atomic64_add(req->cryptlen, &istat->encrypt_tlen);
6481085680bSHerbert Xu }
6491085680bSHerbert Xu
65081bcbb1eSEric Biggers if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
65181bcbb1eSEric Biggers ret = -ENOKEY;
65281bcbb1eSEric Biggers else
6531085680bSHerbert Xu ret = alg->encrypt(req);
6541085680bSHerbert Xu
6551085680bSHerbert Xu return crypto_skcipher_errstat(alg, ret);
65681bcbb1eSEric Biggers }
65781bcbb1eSEric Biggers EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
65881bcbb1eSEric Biggers
crypto_skcipher_decrypt(struct skcipher_request * req)65981bcbb1eSEric Biggers int crypto_skcipher_decrypt(struct skcipher_request *req)
66081bcbb1eSEric Biggers {
66181bcbb1eSEric Biggers struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
6621085680bSHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
66381bcbb1eSEric Biggers int ret;
66481bcbb1eSEric Biggers
6651085680bSHerbert Xu if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
6661085680bSHerbert Xu struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
6671085680bSHerbert Xu
6681085680bSHerbert Xu atomic64_inc(&istat->decrypt_cnt);
6691085680bSHerbert Xu atomic64_add(req->cryptlen, &istat->decrypt_tlen);
6701085680bSHerbert Xu }
6711085680bSHerbert Xu
67281bcbb1eSEric Biggers if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
67381bcbb1eSEric Biggers ret = -ENOKEY;
67481bcbb1eSEric Biggers else
6751085680bSHerbert Xu ret = alg->decrypt(req);
6761085680bSHerbert Xu
6771085680bSHerbert Xu return crypto_skcipher_errstat(alg, ret);
67881bcbb1eSEric Biggers }
67981bcbb1eSEric Biggers EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
68081bcbb1eSEric Biggers
crypto_skcipher_exit_tfm(struct crypto_tfm * tfm)6814e6c3df4SHerbert Xu static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
6824e6c3df4SHerbert Xu {
6834e6c3df4SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
6844e6c3df4SHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
6854e6c3df4SHerbert Xu
6864e6c3df4SHerbert Xu alg->exit(skcipher);
6874e6c3df4SHerbert Xu }
6884e6c3df4SHerbert Xu
crypto_skcipher_init_tfm(struct crypto_tfm * tfm)6897a7ffe65SHerbert Xu static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
6907a7ffe65SHerbert Xu {
6914e6c3df4SHerbert Xu struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
6924e6c3df4SHerbert Xu struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
6934e6c3df4SHerbert Xu
694b1f6b4bfSEric Biggers skcipher_set_needkey(skcipher);
695f8d33facSEric Biggers
6964e6c3df4SHerbert Xu if (alg->exit)
6974e6c3df4SHerbert Xu skcipher->base.exit = crypto_skcipher_exit_tfm;
6984e6c3df4SHerbert Xu
6994e6c3df4SHerbert Xu if (alg->init)
7004e6c3df4SHerbert Xu return alg->init(skcipher);
7014e6c3df4SHerbert Xu
7024e6c3df4SHerbert Xu return 0;
7037a7ffe65SHerbert Xu }
7047a7ffe65SHerbert Xu
crypto_skcipher_free_instance(struct crypto_instance * inst)7054e6c3df4SHerbert Xu static void crypto_skcipher_free_instance(struct crypto_instance *inst)
7064e6c3df4SHerbert Xu {
7074e6c3df4SHerbert Xu struct skcipher_instance *skcipher =
7084e6c3df4SHerbert Xu container_of(inst, struct skcipher_instance, s.base);
7094e6c3df4SHerbert Xu
7104e6c3df4SHerbert Xu skcipher->free(skcipher);
7114e6c3df4SHerbert Xu }
7124e6c3df4SHerbert Xu
7134e6c3df4SHerbert Xu static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
714d8c34b94SGideon Israel Dsouza __maybe_unused;
crypto_skcipher_show(struct seq_file * m,struct crypto_alg * alg)7154e6c3df4SHerbert Xu static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
7164e6c3df4SHerbert Xu {
7171085680bSHerbert Xu struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
7184e6c3df4SHerbert Xu
7194e6c3df4SHerbert Xu seq_printf(m, "type : skcipher\n");
7204e6c3df4SHerbert Xu seq_printf(m, "async : %s\n",
7214e6c3df4SHerbert Xu alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
7224e6c3df4SHerbert Xu seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
7234e6c3df4SHerbert Xu seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
7244e6c3df4SHerbert Xu seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
7254e6c3df4SHerbert Xu seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
7264e6c3df4SHerbert Xu seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
727c821f6abSArd Biesheuvel seq_printf(m, "walksize : %u\n", skcipher->walksize);
7284e6c3df4SHerbert Xu }
7294e6c3df4SHerbert Xu
crypto_skcipher_report(struct sk_buff * skb,struct crypto_alg * alg)730c0f9e01dSHerbert Xu static int __maybe_unused crypto_skcipher_report(
731c0f9e01dSHerbert Xu struct sk_buff *skb, struct crypto_alg *alg)
7324e6c3df4SHerbert Xu {
7331085680bSHerbert Xu struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
7344e6c3df4SHerbert Xu struct crypto_report_blkcipher rblkcipher;
7354e6c3df4SHerbert Xu
73637db69e0SEric Biggers memset(&rblkcipher, 0, sizeof(rblkcipher));
73737db69e0SEric Biggers
73837db69e0SEric Biggers strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
73937db69e0SEric Biggers strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
7404e6c3df4SHerbert Xu
7414e6c3df4SHerbert Xu rblkcipher.blocksize = alg->cra_blocksize;
7424e6c3df4SHerbert Xu rblkcipher.min_keysize = skcipher->min_keysize;
7434e6c3df4SHerbert Xu rblkcipher.max_keysize = skcipher->max_keysize;
7444e6c3df4SHerbert Xu rblkcipher.ivsize = skcipher->ivsize;
7454e6c3df4SHerbert Xu
74637db69e0SEric Biggers return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
74737db69e0SEric Biggers sizeof(rblkcipher), &rblkcipher);
7484e6c3df4SHerbert Xu }
7494e6c3df4SHerbert Xu
crypto_skcipher_report_stat(struct sk_buff * skb,struct crypto_alg * alg)7501085680bSHerbert Xu static int __maybe_unused crypto_skcipher_report_stat(
7511085680bSHerbert Xu struct sk_buff *skb, struct crypto_alg *alg)
7521085680bSHerbert Xu {
7531085680bSHerbert Xu struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
7541085680bSHerbert Xu struct crypto_istat_cipher *istat;
7551085680bSHerbert Xu struct crypto_stat_cipher rcipher;
7561085680bSHerbert Xu
7571085680bSHerbert Xu istat = skcipher_get_stat(skcipher);
7581085680bSHerbert Xu
7591085680bSHerbert Xu memset(&rcipher, 0, sizeof(rcipher));
7601085680bSHerbert Xu
7611085680bSHerbert Xu strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
7621085680bSHerbert Xu
7631085680bSHerbert Xu rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
7641085680bSHerbert Xu rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
7651085680bSHerbert Xu rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
7661085680bSHerbert Xu rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
7671085680bSHerbert Xu rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
7681085680bSHerbert Xu
7691085680bSHerbert Xu return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
7701085680bSHerbert Xu }
7711085680bSHerbert Xu
77253253064SEric Biggers static const struct crypto_type crypto_skcipher_type = {
77389873b44SEric Biggers .extsize = crypto_alg_extsize,
7747a7ffe65SHerbert Xu .init_tfm = crypto_skcipher_init_tfm,
7754e6c3df4SHerbert Xu .free = crypto_skcipher_free_instance,
7764e6c3df4SHerbert Xu #ifdef CONFIG_PROC_FS
7774e6c3df4SHerbert Xu .show = crypto_skcipher_show,
7784e6c3df4SHerbert Xu #endif
779*b8969a1bSOndrej Mosnacek #if IS_ENABLED(CONFIG_CRYPTO_USER)
7804e6c3df4SHerbert Xu .report = crypto_skcipher_report,
781c0f9e01dSHerbert Xu #endif
7821085680bSHerbert Xu #ifdef CONFIG_CRYPTO_STATS
7831085680bSHerbert Xu .report_stat = crypto_skcipher_report_stat,
7841085680bSHerbert Xu #endif
7857a7ffe65SHerbert Xu .maskclear = ~CRYPTO_ALG_TYPE_MASK,
786c65058b7SEric Biggers .maskset = CRYPTO_ALG_TYPE_MASK,
7874e6c3df4SHerbert Xu .type = CRYPTO_ALG_TYPE_SKCIPHER,
7887a7ffe65SHerbert Xu .tfmsize = offsetof(struct crypto_skcipher, base),
7897a7ffe65SHerbert Xu };
7907a7ffe65SHerbert Xu
crypto_grab_skcipher(struct crypto_skcipher_spawn * spawn,struct crypto_instance * inst,const char * name,u32 type,u32 mask)7913a01d0eeSHerbert Xu int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
792b9f76dddSEric Biggers struct crypto_instance *inst,
7934e6c3df4SHerbert Xu const char *name, u32 type, u32 mask)
7944e6c3df4SHerbert Xu {
79553253064SEric Biggers spawn->base.frontend = &crypto_skcipher_type;
796de95c957SEric Biggers return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
7974e6c3df4SHerbert Xu }
7983a01d0eeSHerbert Xu EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
7994e6c3df4SHerbert Xu
crypto_alloc_skcipher(const char * alg_name,u32 type,u32 mask)8007a7ffe65SHerbert Xu struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
8017a7ffe65SHerbert Xu u32 type, u32 mask)
8027a7ffe65SHerbert Xu {
80353253064SEric Biggers return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
8047a7ffe65SHerbert Xu }
8057a7ffe65SHerbert Xu EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
8067a7ffe65SHerbert Xu
crypto_alloc_sync_skcipher(const char * alg_name,u32 type,u32 mask)807b350bee5SKees Cook struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
808b350bee5SKees Cook const char *alg_name, u32 type, u32 mask)
809b350bee5SKees Cook {
810b350bee5SKees Cook struct crypto_skcipher *tfm;
811b350bee5SKees Cook
812b350bee5SKees Cook /* Only sync algorithms allowed. */
813e6cb02bdSHerbert Xu mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
814b350bee5SKees Cook
81553253064SEric Biggers tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
816b350bee5SKees Cook
817b350bee5SKees Cook /*
818b350bee5SKees Cook * Make sure we do not allocate something that might get used with
819b350bee5SKees Cook * an on-stack request: check the request size.
820b350bee5SKees Cook */
821b350bee5SKees Cook if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
822b350bee5SKees Cook MAX_SYNC_SKCIPHER_REQSIZE)) {
823b350bee5SKees Cook crypto_free_skcipher(tfm);
824b350bee5SKees Cook return ERR_PTR(-EINVAL);
825b350bee5SKees Cook }
826b350bee5SKees Cook
827b350bee5SKees Cook return (struct crypto_sync_skcipher *)tfm;
828b350bee5SKees Cook }
829b350bee5SKees Cook EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
830b350bee5SKees Cook
crypto_has_skcipher(const char * alg_name,u32 type,u32 mask)831d3ca75a8SEric Biggers int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
8324e6c3df4SHerbert Xu {
83353253064SEric Biggers return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
8344e6c3df4SHerbert Xu }
835d3ca75a8SEric Biggers EXPORT_SYMBOL_GPL(crypto_has_skcipher);
8364e6c3df4SHerbert Xu
skcipher_prepare_alg(struct skcipher_alg * alg)8374e6c3df4SHerbert Xu static int skcipher_prepare_alg(struct skcipher_alg *alg)
8384e6c3df4SHerbert Xu {
8391085680bSHerbert Xu struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
8404e6c3df4SHerbert Xu struct crypto_alg *base = &alg->base;
8414e6c3df4SHerbert Xu
842c821f6abSArd Biesheuvel if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
843c821f6abSArd Biesheuvel alg->walksize > PAGE_SIZE / 8)
8444e6c3df4SHerbert Xu return -EINVAL;
8454e6c3df4SHerbert Xu
8464e6c3df4SHerbert Xu if (!alg->chunksize)
8474e6c3df4SHerbert Xu alg->chunksize = base->cra_blocksize;
848c821f6abSArd Biesheuvel if (!alg->walksize)
849c821f6abSArd Biesheuvel alg->walksize = alg->chunksize;
8504e6c3df4SHerbert Xu
85153253064SEric Biggers base->cra_type = &crypto_skcipher_type;
8524e6c3df4SHerbert Xu base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
8534e6c3df4SHerbert Xu base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
8544e6c3df4SHerbert Xu
8551085680bSHerbert Xu if (IS_ENABLED(CONFIG_CRYPTO_STATS))
8561085680bSHerbert Xu memset(istat, 0, sizeof(*istat));
8571085680bSHerbert Xu
8584e6c3df4SHerbert Xu return 0;
8594e6c3df4SHerbert Xu }
8604e6c3df4SHerbert Xu
crypto_register_skcipher(struct skcipher_alg * alg)8614e6c3df4SHerbert Xu int crypto_register_skcipher(struct skcipher_alg *alg)
8624e6c3df4SHerbert Xu {
8634e6c3df4SHerbert Xu struct crypto_alg *base = &alg->base;
8644e6c3df4SHerbert Xu int err;
8654e6c3df4SHerbert Xu
8664e6c3df4SHerbert Xu err = skcipher_prepare_alg(alg);
8674e6c3df4SHerbert Xu if (err)
8684e6c3df4SHerbert Xu return err;
8694e6c3df4SHerbert Xu
8704e6c3df4SHerbert Xu return crypto_register_alg(base);
8714e6c3df4SHerbert Xu }
8724e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_skcipher);
8734e6c3df4SHerbert Xu
crypto_unregister_skcipher(struct skcipher_alg * alg)8744e6c3df4SHerbert Xu void crypto_unregister_skcipher(struct skcipher_alg *alg)
8754e6c3df4SHerbert Xu {
8764e6c3df4SHerbert Xu crypto_unregister_alg(&alg->base);
8774e6c3df4SHerbert Xu }
8784e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
8794e6c3df4SHerbert Xu
crypto_register_skciphers(struct skcipher_alg * algs,int count)8804e6c3df4SHerbert Xu int crypto_register_skciphers(struct skcipher_alg *algs, int count)
8814e6c3df4SHerbert Xu {
8824e6c3df4SHerbert Xu int i, ret;
8834e6c3df4SHerbert Xu
8844e6c3df4SHerbert Xu for (i = 0; i < count; i++) {
8854e6c3df4SHerbert Xu ret = crypto_register_skcipher(&algs[i]);
8864e6c3df4SHerbert Xu if (ret)
8874e6c3df4SHerbert Xu goto err;
8884e6c3df4SHerbert Xu }
8894e6c3df4SHerbert Xu
8904e6c3df4SHerbert Xu return 0;
8914e6c3df4SHerbert Xu
8924e6c3df4SHerbert Xu err:
8934e6c3df4SHerbert Xu for (--i; i >= 0; --i)
8944e6c3df4SHerbert Xu crypto_unregister_skcipher(&algs[i]);
8954e6c3df4SHerbert Xu
8964e6c3df4SHerbert Xu return ret;
8974e6c3df4SHerbert Xu }
8984e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_skciphers);
8994e6c3df4SHerbert Xu
crypto_unregister_skciphers(struct skcipher_alg * algs,int count)9004e6c3df4SHerbert Xu void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
9014e6c3df4SHerbert Xu {
9024e6c3df4SHerbert Xu int i;
9034e6c3df4SHerbert Xu
9044e6c3df4SHerbert Xu for (i = count - 1; i >= 0; --i)
9054e6c3df4SHerbert Xu crypto_unregister_skcipher(&algs[i]);
9064e6c3df4SHerbert Xu }
9074e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
9084e6c3df4SHerbert Xu
skcipher_register_instance(struct crypto_template * tmpl,struct skcipher_instance * inst)9094e6c3df4SHerbert Xu int skcipher_register_instance(struct crypto_template *tmpl,
9104e6c3df4SHerbert Xu struct skcipher_instance *inst)
9114e6c3df4SHerbert Xu {
9124e6c3df4SHerbert Xu int err;
9134e6c3df4SHerbert Xu
914d4fdc2dfSEric Biggers if (WARN_ON(!inst->free))
915d4fdc2dfSEric Biggers return -EINVAL;
916d4fdc2dfSEric Biggers
9174e6c3df4SHerbert Xu err = skcipher_prepare_alg(&inst->alg);
9184e6c3df4SHerbert Xu if (err)
9194e6c3df4SHerbert Xu return err;
9204e6c3df4SHerbert Xu
9214e6c3df4SHerbert Xu return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
9224e6c3df4SHerbert Xu }
9234e6c3df4SHerbert Xu EXPORT_SYMBOL_GPL(skcipher_register_instance);
9244e6c3df4SHerbert Xu
skcipher_setkey_simple(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)9250872da16SEric Biggers static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
9260872da16SEric Biggers unsigned int keylen)
9270872da16SEric Biggers {
9280872da16SEric Biggers struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
9290872da16SEric Biggers
9300872da16SEric Biggers crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
9310872da16SEric Biggers crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
9320872da16SEric Biggers CRYPTO_TFM_REQ_MASK);
933af5034e8SEric Biggers return crypto_cipher_setkey(cipher, key, keylen);
9340872da16SEric Biggers }
9350872da16SEric Biggers
skcipher_init_tfm_simple(struct crypto_skcipher * tfm)9360872da16SEric Biggers static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
9370872da16SEric Biggers {
9380872da16SEric Biggers struct skcipher_instance *inst = skcipher_alg_instance(tfm);
939d5ed3b65SEric Biggers struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
9400872da16SEric Biggers struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
9410872da16SEric Biggers struct crypto_cipher *cipher;
9420872da16SEric Biggers
9430872da16SEric Biggers cipher = crypto_spawn_cipher(spawn);
9440872da16SEric Biggers if (IS_ERR(cipher))
9450872da16SEric Biggers return PTR_ERR(cipher);
9460872da16SEric Biggers
9470872da16SEric Biggers ctx->cipher = cipher;
9480872da16SEric Biggers return 0;
9490872da16SEric Biggers }
9500872da16SEric Biggers
skcipher_exit_tfm_simple(struct crypto_skcipher * tfm)9510872da16SEric Biggers static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
9520872da16SEric Biggers {
9530872da16SEric Biggers struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
9540872da16SEric Biggers
9550872da16SEric Biggers crypto_free_cipher(ctx->cipher);
9560872da16SEric Biggers }
9570872da16SEric Biggers
skcipher_free_instance_simple(struct skcipher_instance * inst)9580872da16SEric Biggers static void skcipher_free_instance_simple(struct skcipher_instance *inst)
9590872da16SEric Biggers {
960aacd5b4cSEric Biggers crypto_drop_cipher(skcipher_instance_ctx(inst));
9610872da16SEric Biggers kfree(inst);
9620872da16SEric Biggers }
9630872da16SEric Biggers
9640872da16SEric Biggers /**
9650872da16SEric Biggers * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
9660872da16SEric Biggers *
9670872da16SEric Biggers * Allocate an skcipher_instance for a simple block cipher mode of operation,
9680872da16SEric Biggers * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
9690872da16SEric Biggers * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
9700872da16SEric Biggers * alignmask, and priority are set from the underlying cipher but can be
9710872da16SEric Biggers * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
9720872da16SEric Biggers * default ->setkey(), ->init(), and ->exit() methods are installed.
9730872da16SEric Biggers *
9740872da16SEric Biggers * @tmpl: the template being instantiated
9750872da16SEric Biggers * @tb: the template parameters
9760872da16SEric Biggers *
9770872da16SEric Biggers * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
9780872da16SEric Biggers * needs to register the instance.
9790872da16SEric Biggers */
skcipher_alloc_instance_simple(struct crypto_template * tmpl,struct rtattr ** tb)980b3c16bfcSHerbert Xu struct skcipher_instance *skcipher_alloc_instance_simple(
981b3c16bfcSHerbert Xu struct crypto_template *tmpl, struct rtattr **tb)
9820872da16SEric Biggers {
9830872da16SEric Biggers u32 mask;
984aacd5b4cSEric Biggers struct skcipher_instance *inst;
985aacd5b4cSEric Biggers struct crypto_cipher_spawn *spawn;
986aacd5b4cSEric Biggers struct crypto_alg *cipher_alg;
9870872da16SEric Biggers int err;
9880872da16SEric Biggers
9897bcb2c99SEric Biggers err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
9907bcb2c99SEric Biggers if (err)
9917bcb2c99SEric Biggers return ERR_PTR(err);
9920872da16SEric Biggers
9930872da16SEric Biggers inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
994aacd5b4cSEric Biggers if (!inst)
995aacd5b4cSEric Biggers return ERR_PTR(-ENOMEM);
9960872da16SEric Biggers spawn = skcipher_instance_ctx(inst);
9970872da16SEric Biggers
998aacd5b4cSEric Biggers err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
999aacd5b4cSEric Biggers crypto_attr_alg_name(tb[1]), 0, mask);
1000aacd5b4cSEric Biggers if (err)
1001aacd5b4cSEric Biggers goto err_free_inst;
1002aacd5b4cSEric Biggers cipher_alg = crypto_spawn_cipher_alg(spawn);
1003aacd5b4cSEric Biggers
10040872da16SEric Biggers err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
10050872da16SEric Biggers cipher_alg);
10060872da16SEric Biggers if (err)
10070872da16SEric Biggers goto err_free_inst;
10080872da16SEric Biggers
10090872da16SEric Biggers inst->free = skcipher_free_instance_simple;
10100872da16SEric Biggers
10110872da16SEric Biggers /* Default algorithm properties, can be overridden */
10120872da16SEric Biggers inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
10130872da16SEric Biggers inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
10140872da16SEric Biggers inst->alg.base.cra_priority = cipher_alg->cra_priority;
10150872da16SEric Biggers inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
10160872da16SEric Biggers inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
10170872da16SEric Biggers inst->alg.ivsize = cipher_alg->cra_blocksize;
10180872da16SEric Biggers
10190872da16SEric Biggers /* Use skcipher_ctx_simple by default, can be overridden */
10200872da16SEric Biggers inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
10210872da16SEric Biggers inst->alg.setkey = skcipher_setkey_simple;
10220872da16SEric Biggers inst->alg.init = skcipher_init_tfm_simple;
10230872da16SEric Biggers inst->alg.exit = skcipher_exit_tfm_simple;
10240872da16SEric Biggers
10250872da16SEric Biggers return inst;
10260872da16SEric Biggers
10270872da16SEric Biggers err_free_inst:
1028aacd5b4cSEric Biggers skcipher_free_instance_simple(inst);
10290872da16SEric Biggers return ERR_PTR(err);
10300872da16SEric Biggers }
10310872da16SEric Biggers EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
10320872da16SEric Biggers
10337a7ffe65SHerbert Xu MODULE_LICENSE("GPL");
10347a7ffe65SHerbert Xu MODULE_DESCRIPTION("Symmetric key cipher type");
10350eb76ba2SArd Biesheuvel MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1036