xref: /openbmc/linux/crypto/algif_aead.c (revision d887c52d)
1400c40cfSStephan Mueller /*
2400c40cfSStephan Mueller  * algif_aead: User-space interface for AEAD algorithms
3400c40cfSStephan Mueller  *
4400c40cfSStephan Mueller  * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de>
5400c40cfSStephan Mueller  *
6400c40cfSStephan Mueller  * This file provides the user-space API for AEAD ciphers.
7400c40cfSStephan Mueller  *
8400c40cfSStephan Mueller  * This program is free software; you can redistribute it and/or modify it
9400c40cfSStephan Mueller  * under the terms of the GNU General Public License as published by the Free
10400c40cfSStephan Mueller  * Software Foundation; either version 2 of the License, or (at your option)
11400c40cfSStephan Mueller  * any later version.
12d887c52dSStephan Mueller  *
13d887c52dSStephan Mueller  * The following concept of the memory management is used:
14d887c52dSStephan Mueller  *
15d887c52dSStephan Mueller  * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
16d887c52dSStephan Mueller  * filled by user space with the data submitted via sendpage/sendmsg. Filling
17d887c52dSStephan Mueller  * up the TX SGL does not cause a crypto operation -- the data will only be
18d887c52dSStephan Mueller  * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
19d887c52dSStephan Mueller  * provide a buffer which is tracked with the RX SGL.
20d887c52dSStephan Mueller  *
21d887c52dSStephan Mueller  * During the processing of the recvmsg operation, the cipher request is
22d887c52dSStephan Mueller  * allocated and prepared. As part of the recvmsg operation, the processed
23d887c52dSStephan Mueller  * TX buffers are extracted from the TX SGL into a separate SGL.
24d887c52dSStephan Mueller  *
25d887c52dSStephan Mueller  * After the completion of the crypto operation, the RX SGL and the cipher
26d887c52dSStephan Mueller  * request is released. The extracted TX SGL parts are released together with
27d887c52dSStephan Mueller  * the RX SGL release.
28400c40cfSStephan Mueller  */
29400c40cfSStephan Mueller 
3083094e5eSTadeusz Struk #include <crypto/internal/aead.h>
31400c40cfSStephan Mueller #include <crypto/scatterwalk.h>
32400c40cfSStephan Mueller #include <crypto/if_alg.h>
33400c40cfSStephan Mueller #include <linux/init.h>
34400c40cfSStephan Mueller #include <linux/list.h>
35400c40cfSStephan Mueller #include <linux/kernel.h>
36174cd4b1SIngo Molnar #include <linux/sched/signal.h>
37400c40cfSStephan Mueller #include <linux/mm.h>
38400c40cfSStephan Mueller #include <linux/module.h>
39400c40cfSStephan Mueller #include <linux/net.h>
40400c40cfSStephan Mueller #include <net/sock.h>
41400c40cfSStephan Mueller 
42d887c52dSStephan Mueller struct aead_tsgl {
43d887c52dSStephan Mueller 	struct list_head list;
44d887c52dSStephan Mueller 	unsigned int cur;		/* Last processed SG entry */
45d887c52dSStephan Mueller 	struct scatterlist sg[0];	/* Array of SGs forming the SGL */
46400c40cfSStephan Mueller };
47400c40cfSStephan Mueller 
48d887c52dSStephan Mueller struct aead_rsgl {
4983094e5eSTadeusz Struk 	struct af_alg_sgl sgl;
5083094e5eSTadeusz Struk 	struct list_head list;
51d887c52dSStephan Mueller 	size_t sg_num_bytes;		/* Bytes of data in that SGL */
5283094e5eSTadeusz Struk };
5383094e5eSTadeusz Struk 
5483094e5eSTadeusz Struk struct aead_async_req {
5583094e5eSTadeusz Struk 	struct kiocb *iocb;
56e6534aebSHerbert Xu 	struct sock *sk;
57d887c52dSStephan Mueller 
58d887c52dSStephan Mueller 	struct aead_rsgl first_rsgl;	/* First RX SG */
59d887c52dSStephan Mueller 	struct list_head rsgl_list;	/* Track RX SGs */
60d887c52dSStephan Mueller 
61d887c52dSStephan Mueller 	struct scatterlist *tsgl;	/* priv. TX SGL of buffers to process */
62d887c52dSStephan Mueller 	unsigned int tsgl_entries;	/* number of entries in priv. TX SGL */
63d887c52dSStephan Mueller 
64d887c52dSStephan Mueller 	unsigned int outlen;		/* Filled output buf length */
65d887c52dSStephan Mueller 
66d887c52dSStephan Mueller 	unsigned int areqlen;		/* Length of this data struct */
67d887c52dSStephan Mueller 	struct aead_request aead_req;	/* req ctx trails this struct */
6883094e5eSTadeusz Struk };
6983094e5eSTadeusz Struk 
702a2a251fSStephan Mueller struct aead_tfm {
712a2a251fSStephan Mueller 	struct crypto_aead *aead;
722a2a251fSStephan Mueller 	bool has_key;
732a2a251fSStephan Mueller };
742a2a251fSStephan Mueller 
75400c40cfSStephan Mueller struct aead_ctx {
76d887c52dSStephan Mueller 	struct list_head tsgl_list;	/* Link to TX SGL */
77400c40cfSStephan Mueller 
78400c40cfSStephan Mueller 	void *iv;
79400c40cfSStephan Mueller 	size_t aead_assoclen;
80d887c52dSStephan Mueller 
81d887c52dSStephan Mueller 	struct af_alg_completion completion;	/* sync work queue */
82d887c52dSStephan Mueller 
83d887c52dSStephan Mueller 	size_t used;		/* TX bytes sent to kernel */
84d887c52dSStephan Mueller 	size_t rcvused;		/* total RX bytes to be processed by kernel */
85d887c52dSStephan Mueller 
86d887c52dSStephan Mueller 	bool more;		/* More data to be expected? */
87d887c52dSStephan Mueller 	bool merge;		/* Merge new data into existing SG */
88d887c52dSStephan Mueller 	bool enc;		/* Crypto operation: enc, dec */
89d887c52dSStephan Mueller 
90d887c52dSStephan Mueller 	unsigned int len;	/* Length of allocated memory for this struct */
91400c40cfSStephan Mueller };
92400c40cfSStephan Mueller 
93d887c52dSStephan Mueller #define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \
94d887c52dSStephan Mueller 		      sizeof(struct scatterlist) - 1)
95d887c52dSStephan Mueller 
96400c40cfSStephan Mueller static inline int aead_sndbuf(struct sock *sk)
97400c40cfSStephan Mueller {
98400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
99400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
100400c40cfSStephan Mueller 
101400c40cfSStephan Mueller 	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
102400c40cfSStephan Mueller 			  ctx->used, 0);
103400c40cfSStephan Mueller }
104400c40cfSStephan Mueller 
105400c40cfSStephan Mueller static inline bool aead_writable(struct sock *sk)
106400c40cfSStephan Mueller {
107400c40cfSStephan Mueller 	return PAGE_SIZE <= aead_sndbuf(sk);
108400c40cfSStephan Mueller }
109400c40cfSStephan Mueller 
110d887c52dSStephan Mueller static inline int aead_rcvbuf(struct sock *sk)
111400c40cfSStephan Mueller {
112d887c52dSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
113d887c52dSStephan Mueller 	struct aead_ctx *ctx = ask->private;
114d887c52dSStephan Mueller 
115d887c52dSStephan Mueller 	return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
116d887c52dSStephan Mueller 			  ctx->rcvused, 0);
117d887c52dSStephan Mueller }
118d887c52dSStephan Mueller 
119d887c52dSStephan Mueller static inline bool aead_readable(struct sock *sk)
120d887c52dSStephan Mueller {
121d887c52dSStephan Mueller 	return PAGE_SIZE <= aead_rcvbuf(sk);
122d887c52dSStephan Mueller }
123d887c52dSStephan Mueller 
124d887c52dSStephan Mueller static inline bool aead_sufficient_data(struct sock *sk)
125d887c52dSStephan Mueller {
126d887c52dSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
127d887c52dSStephan Mueller 	struct sock *psk = ask->parent;
128d887c52dSStephan Mueller 	struct alg_sock *pask = alg_sk(psk);
129d887c52dSStephan Mueller 	struct aead_ctx *ctx = ask->private;
130d887c52dSStephan Mueller 	struct aead_tfm *aeadc = pask->private;
131d887c52dSStephan Mueller 	struct crypto_aead *tfm = aeadc->aead;
132d887c52dSStephan Mueller 	unsigned int as = crypto_aead_authsize(tfm);
133400c40cfSStephan Mueller 
1340c1e16cdSStephan Mueller 	/*
1350c1e16cdSStephan Mueller 	 * The minimum amount of memory needed for an AEAD cipher is
1360c1e16cdSStephan Mueller 	 * the AAD and in case of decryption the tag.
1370c1e16cdSStephan Mueller 	 */
1380c1e16cdSStephan Mueller 	return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
139400c40cfSStephan Mueller }
140400c40cfSStephan Mueller 
141d887c52dSStephan Mueller static int aead_alloc_tsgl(struct sock *sk)
142400c40cfSStephan Mueller {
143400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
144400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
145d887c52dSStephan Mueller 	struct aead_tsgl *sgl;
146d887c52dSStephan Mueller 	struct scatterlist *sg = NULL;
147d887c52dSStephan Mueller 
148d887c52dSStephan Mueller 	sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
149d887c52dSStephan Mueller 	if (!list_empty(&ctx->tsgl_list))
150d887c52dSStephan Mueller 		sg = sgl->sg;
151d887c52dSStephan Mueller 
152d887c52dSStephan Mueller 	if (!sg || sgl->cur >= MAX_SGL_ENTS) {
153d887c52dSStephan Mueller 		sgl = sock_kmalloc(sk, sizeof(*sgl) +
154d887c52dSStephan Mueller 				       sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
155d887c52dSStephan Mueller 				   GFP_KERNEL);
156d887c52dSStephan Mueller 		if (!sgl)
157d887c52dSStephan Mueller 			return -ENOMEM;
158d887c52dSStephan Mueller 
159d887c52dSStephan Mueller 		sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
160d887c52dSStephan Mueller 		sgl->cur = 0;
161d887c52dSStephan Mueller 
162d887c52dSStephan Mueller 		if (sg)
163d887c52dSStephan Mueller 			sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
164d887c52dSStephan Mueller 
165d887c52dSStephan Mueller 		list_add_tail(&sgl->list, &ctx->tsgl_list);
166d887c52dSStephan Mueller 	}
167d887c52dSStephan Mueller 
168d887c52dSStephan Mueller 	return 0;
169d887c52dSStephan Mueller }
170d887c52dSStephan Mueller 
171d887c52dSStephan Mueller static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes)
172d887c52dSStephan Mueller {
173d887c52dSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
174d887c52dSStephan Mueller 	struct aead_ctx *ctx = ask->private;
175d887c52dSStephan Mueller 	struct aead_tsgl *sgl, *tmp;
176400c40cfSStephan Mueller 	unsigned int i;
177d887c52dSStephan Mueller 	unsigned int sgl_count = 0;
178d887c52dSStephan Mueller 
179d887c52dSStephan Mueller 	if (!bytes)
180d887c52dSStephan Mueller 		return 0;
181d887c52dSStephan Mueller 
182d887c52dSStephan Mueller 	list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) {
183d887c52dSStephan Mueller 		struct scatterlist *sg = sgl->sg;
184400c40cfSStephan Mueller 
185400c40cfSStephan Mueller 		for (i = 0; i < sgl->cur; i++) {
186d887c52dSStephan Mueller 			sgl_count++;
187d887c52dSStephan Mueller 			if (sg[i].length >= bytes)
188d887c52dSStephan Mueller 				return sgl_count;
189d887c52dSStephan Mueller 
190d887c52dSStephan Mueller 			bytes -= sg[i].length;
191d887c52dSStephan Mueller 		}
192d887c52dSStephan Mueller 	}
193d887c52dSStephan Mueller 
194d887c52dSStephan Mueller 	return sgl_count;
195d887c52dSStephan Mueller }
196d887c52dSStephan Mueller 
197d887c52dSStephan Mueller static void aead_pull_tsgl(struct sock *sk, size_t used,
198d887c52dSStephan Mueller 			   struct scatterlist *dst)
199d887c52dSStephan Mueller {
200d887c52dSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
201d887c52dSStephan Mueller 	struct aead_ctx *ctx = ask->private;
202d887c52dSStephan Mueller 	struct aead_tsgl *sgl;
203d887c52dSStephan Mueller 	struct scatterlist *sg;
204d887c52dSStephan Mueller 	unsigned int i;
205d887c52dSStephan Mueller 
206d887c52dSStephan Mueller 	while (!list_empty(&ctx->tsgl_list)) {
207d887c52dSStephan Mueller 		sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl,
208d887c52dSStephan Mueller 				       list);
209d887c52dSStephan Mueller 		sg = sgl->sg;
210d887c52dSStephan Mueller 
211d887c52dSStephan Mueller 		for (i = 0; i < sgl->cur; i++) {
212d887c52dSStephan Mueller 			size_t plen = min_t(size_t, used, sg[i].length);
213d887c52dSStephan Mueller 			struct page *page = sg_page(sg + i);
214d887c52dSStephan Mueller 
215d887c52dSStephan Mueller 			if (!page)
216400c40cfSStephan Mueller 				continue;
217400c40cfSStephan Mueller 
218d887c52dSStephan Mueller 			/*
219d887c52dSStephan Mueller 			 * Assumption: caller created aead_count_tsgl(len)
220d887c52dSStephan Mueller 			 * SG entries in dst.
221d887c52dSStephan Mueller 			 */
222d887c52dSStephan Mueller 			if (dst)
223d887c52dSStephan Mueller 				sg_set_page(dst + i, page, plen, sg[i].offset);
224d887c52dSStephan Mueller 
225d887c52dSStephan Mueller 			sg[i].length -= plen;
226d887c52dSStephan Mueller 			sg[i].offset += plen;
227d887c52dSStephan Mueller 
228d887c52dSStephan Mueller 			used -= plen;
229d887c52dSStephan Mueller 			ctx->used -= plen;
230d887c52dSStephan Mueller 
231d887c52dSStephan Mueller 			if (sg[i].length)
232d887c52dSStephan Mueller 				return;
233d887c52dSStephan Mueller 
234d887c52dSStephan Mueller 			if (!dst)
235d887c52dSStephan Mueller 				put_page(page);
236400c40cfSStephan Mueller 			sg_assign_page(sg + i, NULL);
237400c40cfSStephan Mueller 		}
238d887c52dSStephan Mueller 
239d887c52dSStephan Mueller 		list_del(&sgl->list);
240d887c52dSStephan Mueller 		sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) *
241d887c52dSStephan Mueller 						     (MAX_SGL_ENTS + 1));
242d887c52dSStephan Mueller 	}
243d887c52dSStephan Mueller 
244d887c52dSStephan Mueller 	if (!ctx->used)
245d887c52dSStephan Mueller 		ctx->merge = 0;
246d887c52dSStephan Mueller }
247d887c52dSStephan Mueller 
248d887c52dSStephan Mueller static void aead_free_areq_sgls(struct aead_async_req *areq)
249d887c52dSStephan Mueller {
250d887c52dSStephan Mueller 	struct sock *sk = areq->sk;
251d887c52dSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
252d887c52dSStephan Mueller 	struct aead_ctx *ctx = ask->private;
253d887c52dSStephan Mueller 	struct aead_rsgl *rsgl, *tmp;
254d887c52dSStephan Mueller 	struct scatterlist *tsgl;
255d887c52dSStephan Mueller 	struct scatterlist *sg;
256d887c52dSStephan Mueller 	unsigned int i;
257d887c52dSStephan Mueller 
258d887c52dSStephan Mueller 	list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) {
259d887c52dSStephan Mueller 		ctx->rcvused -= rsgl->sg_num_bytes;
260d887c52dSStephan Mueller 		af_alg_free_sg(&rsgl->sgl);
261d887c52dSStephan Mueller 		list_del(&rsgl->list);
262d887c52dSStephan Mueller 		if (rsgl != &areq->first_rsgl)
263d887c52dSStephan Mueller 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
264d887c52dSStephan Mueller 	}
265d887c52dSStephan Mueller 
266d887c52dSStephan Mueller 	tsgl = areq->tsgl;
267d887c52dSStephan Mueller 	for_each_sg(tsgl, sg, areq->tsgl_entries, i) {
268d887c52dSStephan Mueller 		if (!sg_page(sg))
269d887c52dSStephan Mueller 			continue;
270d887c52dSStephan Mueller 		put_page(sg_page(sg));
271d887c52dSStephan Mueller 	}
272d887c52dSStephan Mueller 
273d887c52dSStephan Mueller 	if (areq->tsgl && areq->tsgl_entries)
274d887c52dSStephan Mueller 		sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
275d887c52dSStephan Mueller }
276d887c52dSStephan Mueller 
277d887c52dSStephan Mueller static int aead_wait_for_wmem(struct sock *sk, unsigned int flags)
278d887c52dSStephan Mueller {
279d887c52dSStephan Mueller 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
280d887c52dSStephan Mueller 	int err = -ERESTARTSYS;
281d887c52dSStephan Mueller 	long timeout;
282d887c52dSStephan Mueller 
283d887c52dSStephan Mueller 	if (flags & MSG_DONTWAIT)
284d887c52dSStephan Mueller 		return -EAGAIN;
285d887c52dSStephan Mueller 
286d887c52dSStephan Mueller 	sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
287d887c52dSStephan Mueller 
288d887c52dSStephan Mueller 	add_wait_queue(sk_sleep(sk), &wait);
289d887c52dSStephan Mueller 	for (;;) {
290d887c52dSStephan Mueller 		if (signal_pending(current))
291d887c52dSStephan Mueller 			break;
292d887c52dSStephan Mueller 		timeout = MAX_SCHEDULE_TIMEOUT;
293d887c52dSStephan Mueller 		if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) {
294d887c52dSStephan Mueller 			err = 0;
295d887c52dSStephan Mueller 			break;
296d887c52dSStephan Mueller 		}
297d887c52dSStephan Mueller 	}
298d887c52dSStephan Mueller 	remove_wait_queue(sk_sleep(sk), &wait);
299d887c52dSStephan Mueller 
300d887c52dSStephan Mueller 	return err;
301400c40cfSStephan Mueller }
302400c40cfSStephan Mueller 
303400c40cfSStephan Mueller static void aead_wmem_wakeup(struct sock *sk)
304400c40cfSStephan Mueller {
305400c40cfSStephan Mueller 	struct socket_wq *wq;
306400c40cfSStephan Mueller 
307400c40cfSStephan Mueller 	if (!aead_writable(sk))
308400c40cfSStephan Mueller 		return;
309400c40cfSStephan Mueller 
310400c40cfSStephan Mueller 	rcu_read_lock();
311400c40cfSStephan Mueller 	wq = rcu_dereference(sk->sk_wq);
3121ce0bf50SHerbert Xu 	if (skwq_has_sleeper(wq))
313400c40cfSStephan Mueller 		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
314400c40cfSStephan Mueller 							   POLLRDNORM |
315400c40cfSStephan Mueller 							   POLLRDBAND);
316400c40cfSStephan Mueller 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
317400c40cfSStephan Mueller 	rcu_read_unlock();
318400c40cfSStephan Mueller }
319400c40cfSStephan Mueller 
320400c40cfSStephan Mueller static int aead_wait_for_data(struct sock *sk, unsigned flags)
321400c40cfSStephan Mueller {
322d9dc8b0fSWANG Cong 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
323400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
324400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
325400c40cfSStephan Mueller 	long timeout;
326400c40cfSStephan Mueller 	int err = -ERESTARTSYS;
327400c40cfSStephan Mueller 
328400c40cfSStephan Mueller 	if (flags & MSG_DONTWAIT)
329400c40cfSStephan Mueller 		return -EAGAIN;
330400c40cfSStephan Mueller 
3319cd3e072SEric Dumazet 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
332d887c52dSStephan Mueller 
333d9dc8b0fSWANG Cong 	add_wait_queue(sk_sleep(sk), &wait);
334400c40cfSStephan Mueller 	for (;;) {
335400c40cfSStephan Mueller 		if (signal_pending(current))
336400c40cfSStephan Mueller 			break;
337400c40cfSStephan Mueller 		timeout = MAX_SCHEDULE_TIMEOUT;
338d9dc8b0fSWANG Cong 		if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
339400c40cfSStephan Mueller 			err = 0;
340400c40cfSStephan Mueller 			break;
341400c40cfSStephan Mueller 		}
342400c40cfSStephan Mueller 	}
343d9dc8b0fSWANG Cong 	remove_wait_queue(sk_sleep(sk), &wait);
344400c40cfSStephan Mueller 
3459cd3e072SEric Dumazet 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
346400c40cfSStephan Mueller 
347400c40cfSStephan Mueller 	return err;
348400c40cfSStephan Mueller }
349400c40cfSStephan Mueller 
350400c40cfSStephan Mueller static void aead_data_wakeup(struct sock *sk)
351400c40cfSStephan Mueller {
352400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
353400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
354400c40cfSStephan Mueller 	struct socket_wq *wq;
355400c40cfSStephan Mueller 
356400c40cfSStephan Mueller 	if (!ctx->used)
357400c40cfSStephan Mueller 		return;
358400c40cfSStephan Mueller 
359400c40cfSStephan Mueller 	rcu_read_lock();
360400c40cfSStephan Mueller 	wq = rcu_dereference(sk->sk_wq);
3611ce0bf50SHerbert Xu 	if (skwq_has_sleeper(wq))
362400c40cfSStephan Mueller 		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
363400c40cfSStephan Mueller 							   POLLRDNORM |
364400c40cfSStephan Mueller 							   POLLRDBAND);
365400c40cfSStephan Mueller 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
366400c40cfSStephan Mueller 	rcu_read_unlock();
367400c40cfSStephan Mueller }
368400c40cfSStephan Mueller 
369eccd02f3SLinus Torvalds static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
370400c40cfSStephan Mueller {
371400c40cfSStephan Mueller 	struct sock *sk = sock->sk;
372400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
373d887c52dSStephan Mueller 	struct sock *psk = ask->parent;
374d887c52dSStephan Mueller 	struct alg_sock *pask = alg_sk(psk);
375400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
376d887c52dSStephan Mueller 	struct aead_tfm *aeadc = pask->private;
377d887c52dSStephan Mueller 	struct crypto_aead *tfm = aeadc->aead;
378d887c52dSStephan Mueller 	unsigned int ivsize = crypto_aead_ivsize(tfm);
379d887c52dSStephan Mueller 	struct aead_tsgl *sgl;
380400c40cfSStephan Mueller 	struct af_alg_control con = {};
381400c40cfSStephan Mueller 	long copied = 0;
382400c40cfSStephan Mueller 	bool enc = 0;
383400c40cfSStephan Mueller 	bool init = 0;
384d887c52dSStephan Mueller 	int err = 0;
385400c40cfSStephan Mueller 
386400c40cfSStephan Mueller 	if (msg->msg_controllen) {
387400c40cfSStephan Mueller 		err = af_alg_cmsg_send(msg, &con);
388400c40cfSStephan Mueller 		if (err)
389400c40cfSStephan Mueller 			return err;
390400c40cfSStephan Mueller 
391400c40cfSStephan Mueller 		init = 1;
392400c40cfSStephan Mueller 		switch (con.op) {
393400c40cfSStephan Mueller 		case ALG_OP_ENCRYPT:
394400c40cfSStephan Mueller 			enc = 1;
395400c40cfSStephan Mueller 			break;
396400c40cfSStephan Mueller 		case ALG_OP_DECRYPT:
397400c40cfSStephan Mueller 			enc = 0;
398400c40cfSStephan Mueller 			break;
399400c40cfSStephan Mueller 		default:
400400c40cfSStephan Mueller 			return -EINVAL;
401400c40cfSStephan Mueller 		}
402400c40cfSStephan Mueller 
403400c40cfSStephan Mueller 		if (con.iv && con.iv->ivlen != ivsize)
404400c40cfSStephan Mueller 			return -EINVAL;
405400c40cfSStephan Mueller 	}
406400c40cfSStephan Mueller 
407400c40cfSStephan Mueller 	lock_sock(sk);
408d887c52dSStephan Mueller 	if (!ctx->more && ctx->used) {
409d887c52dSStephan Mueller 		err = -EINVAL;
410400c40cfSStephan Mueller 		goto unlock;
411d887c52dSStephan Mueller 	}
412400c40cfSStephan Mueller 
413400c40cfSStephan Mueller 	if (init) {
414400c40cfSStephan Mueller 		ctx->enc = enc;
415400c40cfSStephan Mueller 		if (con.iv)
416400c40cfSStephan Mueller 			memcpy(ctx->iv, con.iv->iv, ivsize);
417400c40cfSStephan Mueller 
418400c40cfSStephan Mueller 		ctx->aead_assoclen = con.aead_assoclen;
419400c40cfSStephan Mueller 	}
420400c40cfSStephan Mueller 
421400c40cfSStephan Mueller 	while (size) {
422d887c52dSStephan Mueller 		struct scatterlist *sg;
423652d5b8aSLABBE Corentin 		size_t len = size;
424d887c52dSStephan Mueller 		size_t plen;
425400c40cfSStephan Mueller 
426400c40cfSStephan Mueller 		/* use the existing memory in an allocated page */
427400c40cfSStephan Mueller 		if (ctx->merge) {
428d887c52dSStephan Mueller 			sgl = list_entry(ctx->tsgl_list.prev,
429d887c52dSStephan Mueller 					 struct aead_tsgl, list);
430400c40cfSStephan Mueller 			sg = sgl->sg + sgl->cur - 1;
431400c40cfSStephan Mueller 			len = min_t(unsigned long, len,
432400c40cfSStephan Mueller 				    PAGE_SIZE - sg->offset - sg->length);
433400c40cfSStephan Mueller 			err = memcpy_from_msg(page_address(sg_page(sg)) +
434400c40cfSStephan Mueller 					      sg->offset + sg->length,
435400c40cfSStephan Mueller 					      msg, len);
436400c40cfSStephan Mueller 			if (err)
437400c40cfSStephan Mueller 				goto unlock;
438400c40cfSStephan Mueller 
439400c40cfSStephan Mueller 			sg->length += len;
440400c40cfSStephan Mueller 			ctx->merge = (sg->offset + sg->length) &
441400c40cfSStephan Mueller 				     (PAGE_SIZE - 1);
442400c40cfSStephan Mueller 
443400c40cfSStephan Mueller 			ctx->used += len;
444400c40cfSStephan Mueller 			copied += len;
445400c40cfSStephan Mueller 			size -= len;
446400c40cfSStephan Mueller 			continue;
447400c40cfSStephan Mueller 		}
448400c40cfSStephan Mueller 
449400c40cfSStephan Mueller 		if (!aead_writable(sk)) {
450d887c52dSStephan Mueller 			err = aead_wait_for_wmem(sk, msg->msg_flags);
451d887c52dSStephan Mueller 			if (err)
452400c40cfSStephan Mueller 				goto unlock;
453400c40cfSStephan Mueller 		}
454400c40cfSStephan Mueller 
455400c40cfSStephan Mueller 		/* allocate a new page */
456400c40cfSStephan Mueller 		len = min_t(unsigned long, size, aead_sndbuf(sk));
457400c40cfSStephan Mueller 
458d887c52dSStephan Mueller 		err = aead_alloc_tsgl(sk);
459d887c52dSStephan Mueller 		if (err)
460400c40cfSStephan Mueller 			goto unlock;
461400c40cfSStephan Mueller 
462d887c52dSStephan Mueller 		sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl,
463d887c52dSStephan Mueller 				 list);
464d887c52dSStephan Mueller 		sg = sgl->sg;
465d887c52dSStephan Mueller 		if (sgl->cur)
466d887c52dSStephan Mueller 			sg_unmark_end(sg + sgl->cur - 1);
467d887c52dSStephan Mueller 
468d887c52dSStephan Mueller 		do {
469d887c52dSStephan Mueller 			unsigned int i = sgl->cur;
470d887c52dSStephan Mueller 
471652d5b8aSLABBE Corentin 			plen = min_t(size_t, len, PAGE_SIZE);
472400c40cfSStephan Mueller 
473d887c52dSStephan Mueller 			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
474d887c52dSStephan Mueller 			if (!sg_page(sg + i)) {
475400c40cfSStephan Mueller 				err = -ENOMEM;
476400c40cfSStephan Mueller 				goto unlock;
477400c40cfSStephan Mueller 			}
478400c40cfSStephan Mueller 
479d887c52dSStephan Mueller 			err = memcpy_from_msg(page_address(sg_page(sg + i)),
480d887c52dSStephan Mueller 					      msg, plen);
481d887c52dSStephan Mueller 			if (err) {
482d887c52dSStephan Mueller 				__free_page(sg_page(sg + i));
483d887c52dSStephan Mueller 				sg_assign_page(sg + i, NULL);
484d887c52dSStephan Mueller 				goto unlock;
485d887c52dSStephan Mueller 			}
486d887c52dSStephan Mueller 
487d887c52dSStephan Mueller 			sg[i].length = plen;
488400c40cfSStephan Mueller 			len -= plen;
489400c40cfSStephan Mueller 			ctx->used += plen;
490400c40cfSStephan Mueller 			copied += plen;
491400c40cfSStephan Mueller 			size -= plen;
492d887c52dSStephan Mueller 			sgl->cur++;
493d887c52dSStephan Mueller 		} while (len && sgl->cur < MAX_SGL_ENTS);
494d887c52dSStephan Mueller 
495d887c52dSStephan Mueller 		if (!size)
496d887c52dSStephan Mueller 			sg_mark_end(sg + sgl->cur - 1);
497d887c52dSStephan Mueller 
498400c40cfSStephan Mueller 		ctx->merge = plen & (PAGE_SIZE - 1);
499400c40cfSStephan Mueller 	}
500400c40cfSStephan Mueller 
501400c40cfSStephan Mueller 	err = 0;
502400c40cfSStephan Mueller 
503400c40cfSStephan Mueller 	ctx->more = msg->msg_flags & MSG_MORE;
504400c40cfSStephan Mueller 
505400c40cfSStephan Mueller unlock:
506400c40cfSStephan Mueller 	aead_data_wakeup(sk);
507400c40cfSStephan Mueller 	release_sock(sk);
508400c40cfSStephan Mueller 
509400c40cfSStephan Mueller 	return err ?: copied;
510400c40cfSStephan Mueller }
511400c40cfSStephan Mueller 
512400c40cfSStephan Mueller static ssize_t aead_sendpage(struct socket *sock, struct page *page,
513400c40cfSStephan Mueller 			     int offset, size_t size, int flags)
514400c40cfSStephan Mueller {
515400c40cfSStephan Mueller 	struct sock *sk = sock->sk;
516400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
517400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
518d887c52dSStephan Mueller 	struct aead_tsgl *sgl;
519400c40cfSStephan Mueller 	int err = -EINVAL;
520400c40cfSStephan Mueller 
521400c40cfSStephan Mueller 	if (flags & MSG_SENDPAGE_NOTLAST)
522400c40cfSStephan Mueller 		flags |= MSG_MORE;
523400c40cfSStephan Mueller 
524400c40cfSStephan Mueller 	lock_sock(sk);
525400c40cfSStephan Mueller 	if (!ctx->more && ctx->used)
526400c40cfSStephan Mueller 		goto unlock;
527400c40cfSStephan Mueller 
528400c40cfSStephan Mueller 	if (!size)
529400c40cfSStephan Mueller 		goto done;
530400c40cfSStephan Mueller 
531400c40cfSStephan Mueller 	if (!aead_writable(sk)) {
532d887c52dSStephan Mueller 		err = aead_wait_for_wmem(sk, flags);
533d887c52dSStephan Mueller 		if (err)
534400c40cfSStephan Mueller 			goto unlock;
535400c40cfSStephan Mueller 	}
536400c40cfSStephan Mueller 
537d887c52dSStephan Mueller 	err = aead_alloc_tsgl(sk);
538d887c52dSStephan Mueller 	if (err)
539d887c52dSStephan Mueller 		goto unlock;
540d887c52dSStephan Mueller 
541400c40cfSStephan Mueller 	ctx->merge = 0;
542d887c52dSStephan Mueller 	sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list);
543d887c52dSStephan Mueller 
544d887c52dSStephan Mueller 	if (sgl->cur)
545d887c52dSStephan Mueller 		sg_unmark_end(sgl->sg + sgl->cur - 1);
546d887c52dSStephan Mueller 
547d887c52dSStephan Mueller 	sg_mark_end(sgl->sg + sgl->cur);
548400c40cfSStephan Mueller 
549400c40cfSStephan Mueller 	get_page(page);
550400c40cfSStephan Mueller 	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
551400c40cfSStephan Mueller 	sgl->cur++;
552400c40cfSStephan Mueller 	ctx->used += size;
553400c40cfSStephan Mueller 
554400c40cfSStephan Mueller 	err = 0;
555400c40cfSStephan Mueller 
556400c40cfSStephan Mueller done:
557400c40cfSStephan Mueller 	ctx->more = flags & MSG_MORE;
558400c40cfSStephan Mueller unlock:
559400c40cfSStephan Mueller 	aead_data_wakeup(sk);
560400c40cfSStephan Mueller 	release_sock(sk);
561400c40cfSStephan Mueller 
562400c40cfSStephan Mueller 	return err ?: size;
563400c40cfSStephan Mueller }
564400c40cfSStephan Mueller 
56583094e5eSTadeusz Struk static void aead_async_cb(struct crypto_async_request *_req, int err)
56683094e5eSTadeusz Struk {
567d887c52dSStephan Mueller 	struct aead_async_req *areq = _req->data;
568e6534aebSHerbert Xu 	struct sock *sk = areq->sk;
56983094e5eSTadeusz Struk 	struct kiocb *iocb = areq->iocb;
570d887c52dSStephan Mueller 	unsigned int resultlen;
57183094e5eSTadeusz Struk 
572d887c52dSStephan Mueller 	lock_sock(sk);
57383094e5eSTadeusz Struk 
574d887c52dSStephan Mueller 	/* Buffer size written by crypto operation. */
575d887c52dSStephan Mueller 	resultlen = areq->outlen;
57683094e5eSTadeusz Struk 
577d887c52dSStephan Mueller 	aead_free_areq_sgls(areq);
578d887c52dSStephan Mueller 	sock_kfree_s(sk, areq, areq->areqlen);
57983094e5eSTadeusz Struk 	__sock_put(sk);
58083094e5eSTadeusz Struk 
581d887c52dSStephan Mueller 	iocb->ki_complete(iocb, err ? err : resultlen, 0);
58283094e5eSTadeusz Struk 
58383094e5eSTadeusz Struk 	release_sock(sk);
58483094e5eSTadeusz Struk }
58583094e5eSTadeusz Struk 
586d887c52dSStephan Mueller static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
587d887c52dSStephan Mueller 			 size_t ignored, int flags)
588400c40cfSStephan Mueller {
589400c40cfSStephan Mueller 	struct sock *sk = sock->sk;
590400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
591d887c52dSStephan Mueller 	struct sock *psk = ask->parent;
592d887c52dSStephan Mueller 	struct alg_sock *pask = alg_sk(psk);
593400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
594d887c52dSStephan Mueller 	struct aead_tfm *aeadc = pask->private;
595d887c52dSStephan Mueller 	struct crypto_aead *tfm = aeadc->aead;
596d887c52dSStephan Mueller 	unsigned int as = crypto_aead_authsize(tfm);
597d887c52dSStephan Mueller 	unsigned int areqlen =
598d887c52dSStephan Mueller 		sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm);
599d887c52dSStephan Mueller 	struct aead_async_req *areq;
600d887c52dSStephan Mueller 	struct aead_rsgl *last_rsgl = NULL;
601d887c52dSStephan Mueller 	int err = 0;
602d887c52dSStephan Mueller 	size_t used = 0;		/* [in]  TX bufs to be en/decrypted */
603d887c52dSStephan Mueller 	size_t outlen = 0;		/* [out] RX bufs produced by kernel */
604d887c52dSStephan Mueller 	size_t usedpages = 0;		/* [in]  RX bufs to be used from user */
605d887c52dSStephan Mueller 	size_t processed = 0;		/* [in]  TX bufs to be consumed */
606400c40cfSStephan Mueller 
607400c40cfSStephan Mueller 	/*
608d887c52dSStephan Mueller 	 * Data length provided by caller via sendmsg/sendpage that has not
609d887c52dSStephan Mueller 	 * yet been processed.
610400c40cfSStephan Mueller 	 */
611400c40cfSStephan Mueller 	used = ctx->used;
612400c40cfSStephan Mueller 
613400c40cfSStephan Mueller 	/*
614400c40cfSStephan Mueller 	 * Make sure sufficient data is present -- note, the same check is
615400c40cfSStephan Mueller 	 * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg
616400c40cfSStephan Mueller 	 * shall provide an information to the data sender that something is
617400c40cfSStephan Mueller 	 * wrong, but they are irrelevant to maintain the kernel integrity.
618400c40cfSStephan Mueller 	 * We need this check here too in case user space decides to not honor
619400c40cfSStephan Mueller 	 * the error message in sendmsg/sendpage and still call recvmsg. This
620400c40cfSStephan Mueller 	 * check here protects the kernel integrity.
621400c40cfSStephan Mueller 	 */
622d887c52dSStephan Mueller 	if (!aead_sufficient_data(sk))
623d887c52dSStephan Mueller 		return -EINVAL;
624400c40cfSStephan Mueller 
6250c1e16cdSStephan Mueller 	/*
6260c1e16cdSStephan Mueller 	 * Calculate the minimum output buffer size holding the result of the
6270c1e16cdSStephan Mueller 	 * cipher operation. When encrypting data, the receiving buffer is
6280c1e16cdSStephan Mueller 	 * larger by the tag length compared to the input buffer as the
6290c1e16cdSStephan Mueller 	 * encryption operation generates the tag. For decryption, the input
6300c1e16cdSStephan Mueller 	 * buffer provides the tag which is consumed resulting in only the
6310c1e16cdSStephan Mueller 	 * plaintext without a buffer for the tag returned to the caller.
6320c1e16cdSStephan Mueller 	 */
6330c1e16cdSStephan Mueller 	if (ctx->enc)
6340c1e16cdSStephan Mueller 		outlen = used + as;
6350c1e16cdSStephan Mueller 	else
6360c1e16cdSStephan Mueller 		outlen = used - as;
63719fa7752SHerbert Xu 
638400c40cfSStephan Mueller 	/*
639400c40cfSStephan Mueller 	 * The cipher operation input data is reduced by the associated data
640400c40cfSStephan Mueller 	 * length as this data is processed separately later on.
641400c40cfSStephan Mueller 	 */
6420c1e16cdSStephan Mueller 	used -= ctx->aead_assoclen;
643400c40cfSStephan Mueller 
644d887c52dSStephan Mueller 	/* Allocate cipher request for current operation. */
645d887c52dSStephan Mueller 	areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
646d887c52dSStephan Mueller 	if (unlikely(!areq))
647d887c52dSStephan Mueller 		return -ENOMEM;
648d887c52dSStephan Mueller 	areq->areqlen = areqlen;
649d887c52dSStephan Mueller 	areq->sk = sk;
650d887c52dSStephan Mueller 	INIT_LIST_HEAD(&areq->rsgl_list);
651d887c52dSStephan Mueller 	areq->tsgl = NULL;
652d887c52dSStephan Mueller 	areq->tsgl_entries = 0;
653400c40cfSStephan Mueller 
654d887c52dSStephan Mueller 	/* convert iovecs of output buffers into RX SGL */
655d887c52dSStephan Mueller 	while (outlen > usedpages && msg_data_left(msg)) {
656d887c52dSStephan Mueller 		struct aead_rsgl *rsgl;
657d887c52dSStephan Mueller 		size_t seglen;
658d887c52dSStephan Mueller 
659d887c52dSStephan Mueller 		/* limit the amount of readable buffers */
660d887c52dSStephan Mueller 		if (!aead_readable(sk))
661d887c52dSStephan Mueller 			break;
662d887c52dSStephan Mueller 
663d887c52dSStephan Mueller 		if (!ctx->used) {
664d887c52dSStephan Mueller 			err = aead_wait_for_data(sk, flags);
665d887c52dSStephan Mueller 			if (err)
666d887c52dSStephan Mueller 				goto free;
667d887c52dSStephan Mueller 		}
668d887c52dSStephan Mueller 
669d887c52dSStephan Mueller 		seglen = min_t(size_t, (outlen - usedpages),
670d887c52dSStephan Mueller 			       msg_data_left(msg));
671d887c52dSStephan Mueller 
672d887c52dSStephan Mueller 		if (list_empty(&areq->rsgl_list)) {
673d887c52dSStephan Mueller 			rsgl = &areq->first_rsgl;
67483094e5eSTadeusz Struk 		} else {
67583094e5eSTadeusz Struk 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
67683094e5eSTadeusz Struk 			if (unlikely(!rsgl)) {
67783094e5eSTadeusz Struk 				err = -ENOMEM;
678d887c52dSStephan Mueller 				goto free;
67983094e5eSTadeusz Struk 			}
68083094e5eSTadeusz Struk 		}
681d887c52dSStephan Mueller 
68283094e5eSTadeusz Struk 		rsgl->sgl.npages = 0;
683d887c52dSStephan Mueller 		list_add_tail(&rsgl->list, &areq->rsgl_list);
68483094e5eSTadeusz Struk 
685400c40cfSStephan Mueller 		/* make one iovec available as scatterlist */
68683094e5eSTadeusz Struk 		err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
687400c40cfSStephan Mueller 		if (err < 0)
688d887c52dSStephan Mueller 			goto free;
689d887c52dSStephan Mueller 
6907b2a18e0STadeusz Struk 		/* chain the new scatterlist with previous one */
69183094e5eSTadeusz Struk 		if (last_rsgl)
69283094e5eSTadeusz Struk 			af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
69383094e5eSTadeusz Struk 
69483094e5eSTadeusz Struk 		last_rsgl = rsgl;
695d887c52dSStephan Mueller 		usedpages += err;
696d887c52dSStephan Mueller 		ctx->rcvused += err;
697d887c52dSStephan Mueller 		rsgl->sg_num_bytes = err;
698400c40cfSStephan Mueller 		iov_iter_advance(&msg->msg_iter, err);
699400c40cfSStephan Mueller 	}
700400c40cfSStephan Mueller 
701d887c52dSStephan Mueller 	/*
702d887c52dSStephan Mueller 	 * Ensure output buffer is sufficiently large. If the caller provides
703d887c52dSStephan Mueller 	 * less buffer space, only use the relative required input size. This
704d887c52dSStephan Mueller 	 * allows AIO operation where the caller sent all data to be processed
705d887c52dSStephan Mueller 	 * and the AIO operation performs the operation on the different chunks
706d887c52dSStephan Mueller 	 * of the input data.
707d887c52dSStephan Mueller 	 */
7080c1e16cdSStephan Mueller 	if (usedpages < outlen) {
709d887c52dSStephan Mueller 		size_t less = outlen - usedpages;
710d887c52dSStephan Mueller 
711d887c52dSStephan Mueller 		if (used < less) {
7120c1e16cdSStephan Mueller 			err = -EINVAL;
713d887c52dSStephan Mueller 			goto free;
714d887c52dSStephan Mueller 		}
715d887c52dSStephan Mueller 		used -= less;
716d887c52dSStephan Mueller 		outlen -= less;
7170c1e16cdSStephan Mueller 	}
718400c40cfSStephan Mueller 
719d887c52dSStephan Mueller 	/*
720d887c52dSStephan Mueller 	 * Create a per request TX SGL for this request which tracks the
721d887c52dSStephan Mueller 	 * SG entries from the global TX SGL.
722d887c52dSStephan Mueller 	 */
723d887c52dSStephan Mueller 	processed = used + ctx->aead_assoclen;
724d887c52dSStephan Mueller 	areq->tsgl_entries = aead_count_tsgl(sk, processed);
725d887c52dSStephan Mueller 	if (!areq->tsgl_entries)
726d887c52dSStephan Mueller 		areq->tsgl_entries = 1;
727d887c52dSStephan Mueller 	areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
728d887c52dSStephan Mueller 				  GFP_KERNEL);
729d887c52dSStephan Mueller 	if (!areq->tsgl) {
730d887c52dSStephan Mueller 		err = -ENOMEM;
731d887c52dSStephan Mueller 		goto free;
732d887c52dSStephan Mueller 	}
733d887c52dSStephan Mueller 	sg_init_table(areq->tsgl, areq->tsgl_entries);
734d887c52dSStephan Mueller 	aead_pull_tsgl(sk, processed, areq->tsgl);
735400c40cfSStephan Mueller 
736d887c52dSStephan Mueller 	/* Initialize the crypto operation */
737d887c52dSStephan Mueller 	aead_request_set_crypt(&areq->aead_req, areq->tsgl,
738d887c52dSStephan Mueller 			       areq->first_rsgl.sgl.sg, used, ctx->iv);
739d887c52dSStephan Mueller 	aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen);
740d887c52dSStephan Mueller 	aead_request_set_tfm(&areq->aead_req, tfm);
741d887c52dSStephan Mueller 
742d887c52dSStephan Mueller 	if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
743d887c52dSStephan Mueller 		/* AIO operation */
744d887c52dSStephan Mueller 		areq->iocb = msg->msg_iocb;
745d887c52dSStephan Mueller 		aead_request_set_callback(&areq->aead_req,
746d887c52dSStephan Mueller 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
747d887c52dSStephan Mueller 					  aead_async_cb, areq);
748d887c52dSStephan Mueller 		err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) :
749d887c52dSStephan Mueller 				 crypto_aead_decrypt(&areq->aead_req);
750d887c52dSStephan Mueller 	} else {
751d887c52dSStephan Mueller 		/* Synchronous operation */
752d887c52dSStephan Mueller 		aead_request_set_callback(&areq->aead_req,
753d887c52dSStephan Mueller 					  CRYPTO_TFM_REQ_MAY_BACKLOG,
754d887c52dSStephan Mueller 					  af_alg_complete, &ctx->completion);
755400c40cfSStephan Mueller 		err = af_alg_wait_for_completion(ctx->enc ?
756d887c52dSStephan Mueller 					 crypto_aead_encrypt(&areq->aead_req) :
757d887c52dSStephan Mueller 					 crypto_aead_decrypt(&areq->aead_req),
758400c40cfSStephan Mueller 					 &ctx->completion);
759400c40cfSStephan Mueller 	}
760400c40cfSStephan Mueller 
761d887c52dSStephan Mueller 	/* AIO operation in progress */
762d887c52dSStephan Mueller 	if (err == -EINPROGRESS) {
763d887c52dSStephan Mueller 		sock_hold(sk);
764400c40cfSStephan Mueller 
765d887c52dSStephan Mueller 		/* Remember output size that will be generated. */
766d887c52dSStephan Mueller 		areq->outlen = outlen;
767d887c52dSStephan Mueller 
768d887c52dSStephan Mueller 		return -EIOCBQUEUED;
76983094e5eSTadeusz Struk 	}
770d887c52dSStephan Mueller 
771d887c52dSStephan Mueller free:
772d887c52dSStephan Mueller 	aead_free_areq_sgls(areq);
773d887c52dSStephan Mueller 	if (areq)
774d887c52dSStephan Mueller 		sock_kfree_s(sk, areq, areqlen);
775400c40cfSStephan Mueller 
776400c40cfSStephan Mueller 	return err ? err : outlen;
777400c40cfSStephan Mueller }
778400c40cfSStephan Mueller 
779d887c52dSStephan Mueller static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
780d887c52dSStephan Mueller 			size_t ignored, int flags)
78183094e5eSTadeusz Struk {
782d887c52dSStephan Mueller 	struct sock *sk = sock->sk;
783d887c52dSStephan Mueller 	int ret = 0;
784d887c52dSStephan Mueller 
785d887c52dSStephan Mueller 	lock_sock(sk);
786d887c52dSStephan Mueller 	while (msg_data_left(msg)) {
787d887c52dSStephan Mueller 		int err = _aead_recvmsg(sock, msg, ignored, flags);
788d887c52dSStephan Mueller 
789d887c52dSStephan Mueller 		/*
790d887c52dSStephan Mueller 		 * This error covers -EIOCBQUEUED which implies that we can
791d887c52dSStephan Mueller 		 * only handle one AIO request. If the caller wants to have
792d887c52dSStephan Mueller 		 * multiple AIO requests in parallel, he must make multiple
793d887c52dSStephan Mueller 		 * separate AIO calls.
794d887c52dSStephan Mueller 		 */
795d887c52dSStephan Mueller 		if (err <= 0) {
796d887c52dSStephan Mueller 			if (err == -EIOCBQUEUED || err == -EBADMSG)
797d887c52dSStephan Mueller 				ret = err;
798d887c52dSStephan Mueller 			goto out;
799d887c52dSStephan Mueller 		}
800d887c52dSStephan Mueller 
801d887c52dSStephan Mueller 		ret += err;
802d887c52dSStephan Mueller 	}
803d887c52dSStephan Mueller 
804d887c52dSStephan Mueller out:
805d887c52dSStephan Mueller 	aead_wmem_wakeup(sk);
806d887c52dSStephan Mueller 	release_sock(sk);
807d887c52dSStephan Mueller 	return ret;
80883094e5eSTadeusz Struk }
80983094e5eSTadeusz Struk 
810400c40cfSStephan Mueller static unsigned int aead_poll(struct file *file, struct socket *sock,
811400c40cfSStephan Mueller 			      poll_table *wait)
812400c40cfSStephan Mueller {
813400c40cfSStephan Mueller 	struct sock *sk = sock->sk;
814400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
815400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
816400c40cfSStephan Mueller 	unsigned int mask;
817400c40cfSStephan Mueller 
818400c40cfSStephan Mueller 	sock_poll_wait(file, sk_sleep(sk), wait);
819400c40cfSStephan Mueller 	mask = 0;
820400c40cfSStephan Mueller 
821400c40cfSStephan Mueller 	if (!ctx->more)
822400c40cfSStephan Mueller 		mask |= POLLIN | POLLRDNORM;
823400c40cfSStephan Mueller 
824400c40cfSStephan Mueller 	if (aead_writable(sk))
825400c40cfSStephan Mueller 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
826400c40cfSStephan Mueller 
827400c40cfSStephan Mueller 	return mask;
828400c40cfSStephan Mueller }
829400c40cfSStephan Mueller 
830400c40cfSStephan Mueller static struct proto_ops algif_aead_ops = {
831400c40cfSStephan Mueller 	.family		=	PF_ALG,
832400c40cfSStephan Mueller 
833400c40cfSStephan Mueller 	.connect	=	sock_no_connect,
834400c40cfSStephan Mueller 	.socketpair	=	sock_no_socketpair,
835400c40cfSStephan Mueller 	.getname	=	sock_no_getname,
836400c40cfSStephan Mueller 	.ioctl		=	sock_no_ioctl,
837400c40cfSStephan Mueller 	.listen		=	sock_no_listen,
838400c40cfSStephan Mueller 	.shutdown	=	sock_no_shutdown,
839400c40cfSStephan Mueller 	.getsockopt	=	sock_no_getsockopt,
840400c40cfSStephan Mueller 	.mmap		=	sock_no_mmap,
841400c40cfSStephan Mueller 	.bind		=	sock_no_bind,
842400c40cfSStephan Mueller 	.accept		=	sock_no_accept,
843400c40cfSStephan Mueller 	.setsockopt	=	sock_no_setsockopt,
844400c40cfSStephan Mueller 
845400c40cfSStephan Mueller 	.release	=	af_alg_release,
846400c40cfSStephan Mueller 	.sendmsg	=	aead_sendmsg,
847400c40cfSStephan Mueller 	.sendpage	=	aead_sendpage,
848400c40cfSStephan Mueller 	.recvmsg	=	aead_recvmsg,
849400c40cfSStephan Mueller 	.poll		=	aead_poll,
850400c40cfSStephan Mueller };
851400c40cfSStephan Mueller 
8522a2a251fSStephan Mueller static int aead_check_key(struct socket *sock)
8532a2a251fSStephan Mueller {
8542a2a251fSStephan Mueller 	int err = 0;
8552a2a251fSStephan Mueller 	struct sock *psk;
8562a2a251fSStephan Mueller 	struct alg_sock *pask;
8572a2a251fSStephan Mueller 	struct aead_tfm *tfm;
8582a2a251fSStephan Mueller 	struct sock *sk = sock->sk;
8592a2a251fSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
8602a2a251fSStephan Mueller 
8612a2a251fSStephan Mueller 	lock_sock(sk);
8622a2a251fSStephan Mueller 	if (ask->refcnt)
8632a2a251fSStephan Mueller 		goto unlock_child;
8642a2a251fSStephan Mueller 
8652a2a251fSStephan Mueller 	psk = ask->parent;
8662a2a251fSStephan Mueller 	pask = alg_sk(ask->parent);
8672a2a251fSStephan Mueller 	tfm = pask->private;
8682a2a251fSStephan Mueller 
8692a2a251fSStephan Mueller 	err = -ENOKEY;
8702a2a251fSStephan Mueller 	lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
8712a2a251fSStephan Mueller 	if (!tfm->has_key)
8722a2a251fSStephan Mueller 		goto unlock;
8732a2a251fSStephan Mueller 
8742a2a251fSStephan Mueller 	if (!pask->refcnt++)
8752a2a251fSStephan Mueller 		sock_hold(psk);
8762a2a251fSStephan Mueller 
8772a2a251fSStephan Mueller 	ask->refcnt = 1;
8782a2a251fSStephan Mueller 	sock_put(psk);
8792a2a251fSStephan Mueller 
8802a2a251fSStephan Mueller 	err = 0;
8812a2a251fSStephan Mueller 
8822a2a251fSStephan Mueller unlock:
8832a2a251fSStephan Mueller 	release_sock(psk);
8842a2a251fSStephan Mueller unlock_child:
8852a2a251fSStephan Mueller 	release_sock(sk);
8862a2a251fSStephan Mueller 
8872a2a251fSStephan Mueller 	return err;
8882a2a251fSStephan Mueller }
8892a2a251fSStephan Mueller 
8902a2a251fSStephan Mueller static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
8912a2a251fSStephan Mueller 				  size_t size)
8922a2a251fSStephan Mueller {
8932a2a251fSStephan Mueller 	int err;
8942a2a251fSStephan Mueller 
8952a2a251fSStephan Mueller 	err = aead_check_key(sock);
8962a2a251fSStephan Mueller 	if (err)
8972a2a251fSStephan Mueller 		return err;
8982a2a251fSStephan Mueller 
8992a2a251fSStephan Mueller 	return aead_sendmsg(sock, msg, size);
9002a2a251fSStephan Mueller }
9012a2a251fSStephan Mueller 
9022a2a251fSStephan Mueller static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
9032a2a251fSStephan Mueller 				       int offset, size_t size, int flags)
9042a2a251fSStephan Mueller {
9052a2a251fSStephan Mueller 	int err;
9062a2a251fSStephan Mueller 
9072a2a251fSStephan Mueller 	err = aead_check_key(sock);
9082a2a251fSStephan Mueller 	if (err)
9092a2a251fSStephan Mueller 		return err;
9102a2a251fSStephan Mueller 
9112a2a251fSStephan Mueller 	return aead_sendpage(sock, page, offset, size, flags);
9122a2a251fSStephan Mueller }
9132a2a251fSStephan Mueller 
9142a2a251fSStephan Mueller static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
9152a2a251fSStephan Mueller 				  size_t ignored, int flags)
9162a2a251fSStephan Mueller {
9172a2a251fSStephan Mueller 	int err;
9182a2a251fSStephan Mueller 
9192a2a251fSStephan Mueller 	err = aead_check_key(sock);
9202a2a251fSStephan Mueller 	if (err)
9212a2a251fSStephan Mueller 		return err;
9222a2a251fSStephan Mueller 
9232a2a251fSStephan Mueller 	return aead_recvmsg(sock, msg, ignored, flags);
9242a2a251fSStephan Mueller }
9252a2a251fSStephan Mueller 
9262a2a251fSStephan Mueller static struct proto_ops algif_aead_ops_nokey = {
9272a2a251fSStephan Mueller 	.family		=	PF_ALG,
9282a2a251fSStephan Mueller 
9292a2a251fSStephan Mueller 	.connect	=	sock_no_connect,
9302a2a251fSStephan Mueller 	.socketpair	=	sock_no_socketpair,
9312a2a251fSStephan Mueller 	.getname	=	sock_no_getname,
9322a2a251fSStephan Mueller 	.ioctl		=	sock_no_ioctl,
9332a2a251fSStephan Mueller 	.listen		=	sock_no_listen,
9342a2a251fSStephan Mueller 	.shutdown	=	sock_no_shutdown,
9352a2a251fSStephan Mueller 	.getsockopt	=	sock_no_getsockopt,
9362a2a251fSStephan Mueller 	.mmap		=	sock_no_mmap,
9372a2a251fSStephan Mueller 	.bind		=	sock_no_bind,
9382a2a251fSStephan Mueller 	.accept		=	sock_no_accept,
9392a2a251fSStephan Mueller 	.setsockopt	=	sock_no_setsockopt,
9402a2a251fSStephan Mueller 
9412a2a251fSStephan Mueller 	.release	=	af_alg_release,
9422a2a251fSStephan Mueller 	.sendmsg	=	aead_sendmsg_nokey,
9432a2a251fSStephan Mueller 	.sendpage	=	aead_sendpage_nokey,
9442a2a251fSStephan Mueller 	.recvmsg	=	aead_recvmsg_nokey,
9452a2a251fSStephan Mueller 	.poll		=	aead_poll,
9462a2a251fSStephan Mueller };
9472a2a251fSStephan Mueller 
948400c40cfSStephan Mueller static void *aead_bind(const char *name, u32 type, u32 mask)
949400c40cfSStephan Mueller {
9502a2a251fSStephan Mueller 	struct aead_tfm *tfm;
9512a2a251fSStephan Mueller 	struct crypto_aead *aead;
9522a2a251fSStephan Mueller 
9532a2a251fSStephan Mueller 	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
9542a2a251fSStephan Mueller 	if (!tfm)
9552a2a251fSStephan Mueller 		return ERR_PTR(-ENOMEM);
9562a2a251fSStephan Mueller 
9572a2a251fSStephan Mueller 	aead = crypto_alloc_aead(name, type, mask);
9582a2a251fSStephan Mueller 	if (IS_ERR(aead)) {
9592a2a251fSStephan Mueller 		kfree(tfm);
9602a2a251fSStephan Mueller 		return ERR_CAST(aead);
9612a2a251fSStephan Mueller 	}
9622a2a251fSStephan Mueller 
9632a2a251fSStephan Mueller 	tfm->aead = aead;
9642a2a251fSStephan Mueller 
9652a2a251fSStephan Mueller 	return tfm;
966400c40cfSStephan Mueller }
967400c40cfSStephan Mueller 
968400c40cfSStephan Mueller static void aead_release(void *private)
969400c40cfSStephan Mueller {
9702a2a251fSStephan Mueller 	struct aead_tfm *tfm = private;
9712a2a251fSStephan Mueller 
9722a2a251fSStephan Mueller 	crypto_free_aead(tfm->aead);
9732a2a251fSStephan Mueller 	kfree(tfm);
974400c40cfSStephan Mueller }
975400c40cfSStephan Mueller 
976400c40cfSStephan Mueller static int aead_setauthsize(void *private, unsigned int authsize)
977400c40cfSStephan Mueller {
9782a2a251fSStephan Mueller 	struct aead_tfm *tfm = private;
9792a2a251fSStephan Mueller 
9802a2a251fSStephan Mueller 	return crypto_aead_setauthsize(tfm->aead, authsize);
981400c40cfSStephan Mueller }
982400c40cfSStephan Mueller 
983400c40cfSStephan Mueller static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
984400c40cfSStephan Mueller {
9852a2a251fSStephan Mueller 	struct aead_tfm *tfm = private;
9862a2a251fSStephan Mueller 	int err;
9872a2a251fSStephan Mueller 
9882a2a251fSStephan Mueller 	err = crypto_aead_setkey(tfm->aead, key, keylen);
9892a2a251fSStephan Mueller 	tfm->has_key = !err;
9902a2a251fSStephan Mueller 
9912a2a251fSStephan Mueller 	return err;
992400c40cfSStephan Mueller }
993400c40cfSStephan Mueller 
994400c40cfSStephan Mueller static void aead_sock_destruct(struct sock *sk)
995400c40cfSStephan Mueller {
996400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
997400c40cfSStephan Mueller 	struct aead_ctx *ctx = ask->private;
998d887c52dSStephan Mueller 	struct sock *psk = ask->parent;
999d887c52dSStephan Mueller 	struct alg_sock *pask = alg_sk(psk);
1000d887c52dSStephan Mueller 	struct aead_tfm *aeadc = pask->private;
1001d887c52dSStephan Mueller 	struct crypto_aead *tfm = aeadc->aead;
1002d887c52dSStephan Mueller 	unsigned int ivlen = crypto_aead_ivsize(tfm);
1003400c40cfSStephan Mueller 
1004d887c52dSStephan Mueller 	aead_pull_tsgl(sk, ctx->used, NULL);
1005400c40cfSStephan Mueller 	sock_kzfree_s(sk, ctx->iv, ivlen);
1006400c40cfSStephan Mueller 	sock_kfree_s(sk, ctx, ctx->len);
1007400c40cfSStephan Mueller 	af_alg_release_parent(sk);
1008400c40cfSStephan Mueller }
1009400c40cfSStephan Mueller 
10102a2a251fSStephan Mueller static int aead_accept_parent_nokey(void *private, struct sock *sk)
1011400c40cfSStephan Mueller {
1012400c40cfSStephan Mueller 	struct aead_ctx *ctx;
1013400c40cfSStephan Mueller 	struct alg_sock *ask = alg_sk(sk);
10142a2a251fSStephan Mueller 	struct aead_tfm *tfm = private;
10152a2a251fSStephan Mueller 	struct crypto_aead *aead = tfm->aead;
1016d887c52dSStephan Mueller 	unsigned int len = sizeof(*ctx);
10172a2a251fSStephan Mueller 	unsigned int ivlen = crypto_aead_ivsize(aead);
1018400c40cfSStephan Mueller 
1019400c40cfSStephan Mueller 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
1020400c40cfSStephan Mueller 	if (!ctx)
1021400c40cfSStephan Mueller 		return -ENOMEM;
1022400c40cfSStephan Mueller 	memset(ctx, 0, len);
1023400c40cfSStephan Mueller 
1024400c40cfSStephan Mueller 	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
1025400c40cfSStephan Mueller 	if (!ctx->iv) {
1026400c40cfSStephan Mueller 		sock_kfree_s(sk, ctx, len);
1027400c40cfSStephan Mueller 		return -ENOMEM;
1028400c40cfSStephan Mueller 	}
1029400c40cfSStephan Mueller 	memset(ctx->iv, 0, ivlen);
1030400c40cfSStephan Mueller 
1031d887c52dSStephan Mueller 	INIT_LIST_HEAD(&ctx->tsgl_list);
1032400c40cfSStephan Mueller 	ctx->len = len;
1033400c40cfSStephan Mueller 	ctx->used = 0;
1034d887c52dSStephan Mueller 	ctx->rcvused = 0;
1035400c40cfSStephan Mueller 	ctx->more = 0;
1036400c40cfSStephan Mueller 	ctx->merge = 0;
1037400c40cfSStephan Mueller 	ctx->enc = 0;
1038400c40cfSStephan Mueller 	ctx->aead_assoclen = 0;
1039400c40cfSStephan Mueller 	af_alg_init_completion(&ctx->completion);
1040400c40cfSStephan Mueller 
1041400c40cfSStephan Mueller 	ask->private = ctx;
1042400c40cfSStephan Mueller 
1043400c40cfSStephan Mueller 	sk->sk_destruct = aead_sock_destruct;
1044400c40cfSStephan Mueller 
1045400c40cfSStephan Mueller 	return 0;
1046400c40cfSStephan Mueller }
1047400c40cfSStephan Mueller 
10482a2a251fSStephan Mueller static int aead_accept_parent(void *private, struct sock *sk)
10492a2a251fSStephan Mueller {
10502a2a251fSStephan Mueller 	struct aead_tfm *tfm = private;
10512a2a251fSStephan Mueller 
10522a2a251fSStephan Mueller 	if (!tfm->has_key)
10532a2a251fSStephan Mueller 		return -ENOKEY;
10542a2a251fSStephan Mueller 
10552a2a251fSStephan Mueller 	return aead_accept_parent_nokey(private, sk);
10562a2a251fSStephan Mueller }
10572a2a251fSStephan Mueller 
1058400c40cfSStephan Mueller static const struct af_alg_type algif_type_aead = {
1059400c40cfSStephan Mueller 	.bind		=	aead_bind,
1060400c40cfSStephan Mueller 	.release	=	aead_release,
1061400c40cfSStephan Mueller 	.setkey		=	aead_setkey,
1062400c40cfSStephan Mueller 	.setauthsize	=	aead_setauthsize,
1063400c40cfSStephan Mueller 	.accept		=	aead_accept_parent,
10642a2a251fSStephan Mueller 	.accept_nokey	=	aead_accept_parent_nokey,
1065400c40cfSStephan Mueller 	.ops		=	&algif_aead_ops,
10662a2a251fSStephan Mueller 	.ops_nokey	=	&algif_aead_ops_nokey,
1067400c40cfSStephan Mueller 	.name		=	"aead",
1068400c40cfSStephan Mueller 	.owner		=	THIS_MODULE
1069400c40cfSStephan Mueller };
1070400c40cfSStephan Mueller 
1071400c40cfSStephan Mueller static int __init algif_aead_init(void)
1072400c40cfSStephan Mueller {
1073400c40cfSStephan Mueller 	return af_alg_register_type(&algif_type_aead);
1074400c40cfSStephan Mueller }
1075400c40cfSStephan Mueller 
1076400c40cfSStephan Mueller static void __exit algif_aead_exit(void)
1077400c40cfSStephan Mueller {
1078400c40cfSStephan Mueller 	int err = af_alg_unregister_type(&algif_type_aead);
1079400c40cfSStephan Mueller 	BUG_ON(err);
1080400c40cfSStephan Mueller }
1081400c40cfSStephan Mueller 
1082400c40cfSStephan Mueller module_init(algif_aead_init);
1083400c40cfSStephan Mueller module_exit(algif_aead_exit);
1084400c40cfSStephan Mueller MODULE_LICENSE("GPL");
1085400c40cfSStephan Mueller MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
1086400c40cfSStephan Mueller MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
1087