18ff59090SHerbert Xu /* 28ff59090SHerbert Xu * algif_skcipher: User-space interface for skcipher algorithms 38ff59090SHerbert Xu * 48ff59090SHerbert Xu * This file provides the user-space API for symmetric key ciphers. 58ff59090SHerbert Xu * 68ff59090SHerbert Xu * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 78ff59090SHerbert Xu * 88ff59090SHerbert Xu * This program is free software; you can redistribute it and/or modify it 98ff59090SHerbert Xu * under the terms of the GNU General Public License as published by the Free 108ff59090SHerbert Xu * Software Foundation; either version 2 of the License, or (at your option) 118ff59090SHerbert Xu * any later version. 128ff59090SHerbert Xu * 138ff59090SHerbert Xu */ 148ff59090SHerbert Xu 158ff59090SHerbert Xu #include <crypto/scatterwalk.h> 168ff59090SHerbert Xu #include <crypto/skcipher.h> 178ff59090SHerbert Xu #include <crypto/if_alg.h> 188ff59090SHerbert Xu #include <linux/init.h> 198ff59090SHerbert Xu #include <linux/list.h> 208ff59090SHerbert Xu #include <linux/kernel.h> 21*174cd4b1SIngo Molnar #include <linux/sched/signal.h> 228ff59090SHerbert Xu #include <linux/mm.h> 238ff59090SHerbert Xu #include <linux/module.h> 248ff59090SHerbert Xu #include <linux/net.h> 258ff59090SHerbert Xu #include <net/sock.h> 268ff59090SHerbert Xu 278ff59090SHerbert Xu struct skcipher_sg_list { 288ff59090SHerbert Xu struct list_head list; 298ff59090SHerbert Xu 308ff59090SHerbert Xu int cur; 318ff59090SHerbert Xu 328ff59090SHerbert Xu struct scatterlist sg[0]; 338ff59090SHerbert Xu }; 348ff59090SHerbert Xu 35dd504589SHerbert Xu struct skcipher_tfm { 36dd504589SHerbert Xu struct crypto_skcipher *skcipher; 37dd504589SHerbert Xu bool has_key; 38dd504589SHerbert Xu }; 39dd504589SHerbert Xu 408ff59090SHerbert Xu struct skcipher_ctx { 418ff59090SHerbert Xu struct list_head tsgl; 428ff59090SHerbert Xu struct af_alg_sgl rsgl; 438ff59090SHerbert Xu 448ff59090SHerbert Xu void *iv; 458ff59090SHerbert Xu 468ff59090SHerbert Xu struct af_alg_completion completion; 478ff59090SHerbert Xu 48a596999bSTadeusz Struk atomic_t inflight; 49652d5b8aSLABBE Corentin size_t used; 508ff59090SHerbert Xu 518ff59090SHerbert Xu unsigned int len; 528ff59090SHerbert Xu bool more; 538ff59090SHerbert Xu bool merge; 548ff59090SHerbert Xu bool enc; 558ff59090SHerbert Xu 560d96e4baSHerbert Xu struct skcipher_request req; 578ff59090SHerbert Xu }; 588ff59090SHerbert Xu 59a596999bSTadeusz Struk struct skcipher_async_rsgl { 60a596999bSTadeusz Struk struct af_alg_sgl sgl; 61a596999bSTadeusz Struk struct list_head list; 62a596999bSTadeusz Struk }; 63a596999bSTadeusz Struk 64a596999bSTadeusz Struk struct skcipher_async_req { 65a596999bSTadeusz Struk struct kiocb *iocb; 66a596999bSTadeusz Struk struct skcipher_async_rsgl first_sgl; 67a596999bSTadeusz Struk struct list_head list; 68a596999bSTadeusz Struk struct scatterlist *tsg; 69ec69bbfbSHerbert Xu atomic_t *inflight; 70ec69bbfbSHerbert Xu struct skcipher_request req; 71a596999bSTadeusz Struk }; 72a596999bSTadeusz Struk 73e2cffb5fSOndrej Kozina #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ 748ff59090SHerbert Xu sizeof(struct scatterlist) - 1) 758ff59090SHerbert Xu 76a596999bSTadeusz Struk static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) 77a596999bSTadeusz Struk { 78a596999bSTadeusz Struk struct skcipher_async_rsgl *rsgl, *tmp; 79a596999bSTadeusz Struk struct scatterlist *sgl; 80a596999bSTadeusz Struk struct scatterlist *sg; 81a596999bSTadeusz Struk int i, n; 82a596999bSTadeusz Struk 83a596999bSTadeusz Struk list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { 84a596999bSTadeusz Struk af_alg_free_sg(&rsgl->sgl); 85a596999bSTadeusz Struk if (rsgl != &sreq->first_sgl) 86a596999bSTadeusz Struk kfree(rsgl); 87a596999bSTadeusz Struk } 88a596999bSTadeusz Struk sgl = sreq->tsg; 89a596999bSTadeusz Struk n = sg_nents(sgl); 90a596999bSTadeusz Struk for_each_sg(sgl, sg, n, i) 91a596999bSTadeusz Struk put_page(sg_page(sg)); 92a596999bSTadeusz Struk 93a596999bSTadeusz Struk kfree(sreq->tsg); 94a596999bSTadeusz Struk } 95a596999bSTadeusz Struk 96a596999bSTadeusz Struk static void skcipher_async_cb(struct crypto_async_request *req, int err) 97a596999bSTadeusz Struk { 98ec69bbfbSHerbert Xu struct skcipher_async_req *sreq = req->data; 99a596999bSTadeusz Struk struct kiocb *iocb = sreq->iocb; 100a596999bSTadeusz Struk 101ec69bbfbSHerbert Xu atomic_dec(sreq->inflight); 102a596999bSTadeusz Struk skcipher_free_async_sgls(sreq); 103ec69bbfbSHerbert Xu kzfree(sreq); 104237dae88SAl Viro iocb->ki_complete(iocb, err, err); 105a596999bSTadeusz Struk } 106a596999bSTadeusz Struk 1070f6bb83cSHerbert Xu static inline int skcipher_sndbuf(struct sock *sk) 1088ff59090SHerbert Xu { 1098ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 1108ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 1118ff59090SHerbert Xu 1120f6bb83cSHerbert Xu return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 1130f6bb83cSHerbert Xu ctx->used, 0); 1140f6bb83cSHerbert Xu } 1150f6bb83cSHerbert Xu 1160f6bb83cSHerbert Xu static inline bool skcipher_writable(struct sock *sk) 1170f6bb83cSHerbert Xu { 1180f6bb83cSHerbert Xu return PAGE_SIZE <= skcipher_sndbuf(sk); 1198ff59090SHerbert Xu } 1208ff59090SHerbert Xu 1218ff59090SHerbert Xu static int skcipher_alloc_sgl(struct sock *sk) 1228ff59090SHerbert Xu { 1238ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 1248ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 1258ff59090SHerbert Xu struct skcipher_sg_list *sgl; 1268ff59090SHerbert Xu struct scatterlist *sg = NULL; 1278ff59090SHerbert Xu 1288ff59090SHerbert Xu sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 1298ff59090SHerbert Xu if (!list_empty(&ctx->tsgl)) 1308ff59090SHerbert Xu sg = sgl->sg; 1318ff59090SHerbert Xu 1328ff59090SHerbert Xu if (!sg || sgl->cur >= MAX_SGL_ENTS) { 1338ff59090SHerbert Xu sgl = sock_kmalloc(sk, sizeof(*sgl) + 1348ff59090SHerbert Xu sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 1358ff59090SHerbert Xu GFP_KERNEL); 1368ff59090SHerbert Xu if (!sgl) 1378ff59090SHerbert Xu return -ENOMEM; 1388ff59090SHerbert Xu 1398ff59090SHerbert Xu sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 1408ff59090SHerbert Xu sgl->cur = 0; 1418ff59090SHerbert Xu 1428ff59090SHerbert Xu if (sg) 143c56f6d12SDan Williams sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 1448ff59090SHerbert Xu 1458ff59090SHerbert Xu list_add_tail(&sgl->list, &ctx->tsgl); 1468ff59090SHerbert Xu } 1478ff59090SHerbert Xu 1488ff59090SHerbert Xu return 0; 1498ff59090SHerbert Xu } 1508ff59090SHerbert Xu 151652d5b8aSLABBE Corentin static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) 1528ff59090SHerbert Xu { 1538ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 1548ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 1558ff59090SHerbert Xu struct skcipher_sg_list *sgl; 1568ff59090SHerbert Xu struct scatterlist *sg; 1578ff59090SHerbert Xu int i; 1588ff59090SHerbert Xu 1598ff59090SHerbert Xu while (!list_empty(&ctx->tsgl)) { 1608ff59090SHerbert Xu sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, 1618ff59090SHerbert Xu list); 1628ff59090SHerbert Xu sg = sgl->sg; 1638ff59090SHerbert Xu 1648ff59090SHerbert Xu for (i = 0; i < sgl->cur; i++) { 165652d5b8aSLABBE Corentin size_t plen = min_t(size_t, used, sg[i].length); 1668ff59090SHerbert Xu 1678ff59090SHerbert Xu if (!sg_page(sg + i)) 1688ff59090SHerbert Xu continue; 1698ff59090SHerbert Xu 1708ff59090SHerbert Xu sg[i].length -= plen; 1718ff59090SHerbert Xu sg[i].offset += plen; 1728ff59090SHerbert Xu 1738ff59090SHerbert Xu used -= plen; 1748ff59090SHerbert Xu ctx->used -= plen; 1758ff59090SHerbert Xu 1768ff59090SHerbert Xu if (sg[i].length) 1778ff59090SHerbert Xu return; 178a596999bSTadeusz Struk if (put) 1798ff59090SHerbert Xu put_page(sg_page(sg + i)); 1808ff59090SHerbert Xu sg_assign_page(sg + i, NULL); 1818ff59090SHerbert Xu } 1828ff59090SHerbert Xu 1838ff59090SHerbert Xu list_del(&sgl->list); 1848ff59090SHerbert Xu sock_kfree_s(sk, sgl, 1858ff59090SHerbert Xu sizeof(*sgl) + sizeof(sgl->sg[0]) * 1868ff59090SHerbert Xu (MAX_SGL_ENTS + 1)); 1878ff59090SHerbert Xu } 1888ff59090SHerbert Xu 1898ff59090SHerbert Xu if (!ctx->used) 1908ff59090SHerbert Xu ctx->merge = 0; 1918ff59090SHerbert Xu } 1928ff59090SHerbert Xu 1938ff59090SHerbert Xu static void skcipher_free_sgl(struct sock *sk) 1948ff59090SHerbert Xu { 1958ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 1968ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 1978ff59090SHerbert Xu 198a596999bSTadeusz Struk skcipher_pull_sgl(sk, ctx->used, 1); 1998ff59090SHerbert Xu } 2008ff59090SHerbert Xu 2018ff59090SHerbert Xu static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 2028ff59090SHerbert Xu { 203d9dc8b0fSWANG Cong DEFINE_WAIT_FUNC(wait, woken_wake_function); 2048ff59090SHerbert Xu int err = -ERESTARTSYS; 205d9dc8b0fSWANG Cong long timeout; 2068ff59090SHerbert Xu 2078ff59090SHerbert Xu if (flags & MSG_DONTWAIT) 2088ff59090SHerbert Xu return -EAGAIN; 2098ff59090SHerbert Xu 2109cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2118ff59090SHerbert Xu 212d9dc8b0fSWANG Cong add_wait_queue(sk_sleep(sk), &wait); 2138ff59090SHerbert Xu for (;;) { 2148ff59090SHerbert Xu if (signal_pending(current)) 2158ff59090SHerbert Xu break; 2168ff59090SHerbert Xu timeout = MAX_SCHEDULE_TIMEOUT; 217d9dc8b0fSWANG Cong if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { 2188ff59090SHerbert Xu err = 0; 2198ff59090SHerbert Xu break; 2208ff59090SHerbert Xu } 2218ff59090SHerbert Xu } 222d9dc8b0fSWANG Cong remove_wait_queue(sk_sleep(sk), &wait); 2238ff59090SHerbert Xu 2248ff59090SHerbert Xu return err; 2258ff59090SHerbert Xu } 2268ff59090SHerbert Xu 2278ff59090SHerbert Xu static void skcipher_wmem_wakeup(struct sock *sk) 2288ff59090SHerbert Xu { 2298ff59090SHerbert Xu struct socket_wq *wq; 2308ff59090SHerbert Xu 2318ff59090SHerbert Xu if (!skcipher_writable(sk)) 2328ff59090SHerbert Xu return; 2338ff59090SHerbert Xu 2348ff59090SHerbert Xu rcu_read_lock(); 2358ff59090SHerbert Xu wq = rcu_dereference(sk->sk_wq); 2361ce0bf50SHerbert Xu if (skwq_has_sleeper(wq)) 2378ff59090SHerbert Xu wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 2388ff59090SHerbert Xu POLLRDNORM | 2398ff59090SHerbert Xu POLLRDBAND); 2408ff59090SHerbert Xu sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2418ff59090SHerbert Xu rcu_read_unlock(); 2428ff59090SHerbert Xu } 2438ff59090SHerbert Xu 2448ff59090SHerbert Xu static int skcipher_wait_for_data(struct sock *sk, unsigned flags) 2458ff59090SHerbert Xu { 246d9dc8b0fSWANG Cong DEFINE_WAIT_FUNC(wait, woken_wake_function); 2478ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 2488ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 2498ff59090SHerbert Xu long timeout; 2508ff59090SHerbert Xu int err = -ERESTARTSYS; 2518ff59090SHerbert Xu 2528ff59090SHerbert Xu if (flags & MSG_DONTWAIT) { 2538ff59090SHerbert Xu return -EAGAIN; 2548ff59090SHerbert Xu } 2558ff59090SHerbert Xu 2569cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2578ff59090SHerbert Xu 258d9dc8b0fSWANG Cong add_wait_queue(sk_sleep(sk), &wait); 2598ff59090SHerbert Xu for (;;) { 2608ff59090SHerbert Xu if (signal_pending(current)) 2618ff59090SHerbert Xu break; 2628ff59090SHerbert Xu timeout = MAX_SCHEDULE_TIMEOUT; 263d9dc8b0fSWANG Cong if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { 2648ff59090SHerbert Xu err = 0; 2658ff59090SHerbert Xu break; 2668ff59090SHerbert Xu } 2678ff59090SHerbert Xu } 268d9dc8b0fSWANG Cong remove_wait_queue(sk_sleep(sk), &wait); 2698ff59090SHerbert Xu 2709cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2718ff59090SHerbert Xu 2728ff59090SHerbert Xu return err; 2738ff59090SHerbert Xu } 2748ff59090SHerbert Xu 2758ff59090SHerbert Xu static void skcipher_data_wakeup(struct sock *sk) 2768ff59090SHerbert Xu { 2778ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 2788ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 2798ff59090SHerbert Xu struct socket_wq *wq; 2808ff59090SHerbert Xu 2818ff59090SHerbert Xu if (!ctx->used) 2828ff59090SHerbert Xu return; 2838ff59090SHerbert Xu 2848ff59090SHerbert Xu rcu_read_lock(); 2858ff59090SHerbert Xu wq = rcu_dereference(sk->sk_wq); 2861ce0bf50SHerbert Xu if (skwq_has_sleeper(wq)) 2878ff59090SHerbert Xu wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2888ff59090SHerbert Xu POLLRDNORM | 2898ff59090SHerbert Xu POLLRDBAND); 2908ff59090SHerbert Xu sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2918ff59090SHerbert Xu rcu_read_unlock(); 2928ff59090SHerbert Xu } 2938ff59090SHerbert Xu 2941b784140SYing Xue static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 2951b784140SYing Xue size_t size) 2968ff59090SHerbert Xu { 2978ff59090SHerbert Xu struct sock *sk = sock->sk; 2988ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 2996454c2b8SHerbert Xu struct sock *psk = ask->parent; 3006454c2b8SHerbert Xu struct alg_sock *pask = alg_sk(psk); 3018ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 3026454c2b8SHerbert Xu struct skcipher_tfm *skc = pask->private; 3036454c2b8SHerbert Xu struct crypto_skcipher *tfm = skc->skcipher; 3040d96e4baSHerbert Xu unsigned ivsize = crypto_skcipher_ivsize(tfm); 3058ff59090SHerbert Xu struct skcipher_sg_list *sgl; 3068ff59090SHerbert Xu struct af_alg_control con = {}; 3078ff59090SHerbert Xu long copied = 0; 3088ff59090SHerbert Xu bool enc = 0; 309f26b7b80SStephan Mueller bool init = 0; 3108ff59090SHerbert Xu int err; 3118ff59090SHerbert Xu int i; 3128ff59090SHerbert Xu 3138ff59090SHerbert Xu if (msg->msg_controllen) { 3148ff59090SHerbert Xu err = af_alg_cmsg_send(msg, &con); 3158ff59090SHerbert Xu if (err) 3168ff59090SHerbert Xu return err; 3178ff59090SHerbert Xu 318f26b7b80SStephan Mueller init = 1; 3198ff59090SHerbert Xu switch (con.op) { 3208ff59090SHerbert Xu case ALG_OP_ENCRYPT: 3218ff59090SHerbert Xu enc = 1; 3228ff59090SHerbert Xu break; 3238ff59090SHerbert Xu case ALG_OP_DECRYPT: 3248ff59090SHerbert Xu enc = 0; 3258ff59090SHerbert Xu break; 3268ff59090SHerbert Xu default: 3278ff59090SHerbert Xu return -EINVAL; 3288ff59090SHerbert Xu } 3298ff59090SHerbert Xu 3308ff59090SHerbert Xu if (con.iv && con.iv->ivlen != ivsize) 3318ff59090SHerbert Xu return -EINVAL; 3328ff59090SHerbert Xu } 3338ff59090SHerbert Xu 3348ff59090SHerbert Xu err = -EINVAL; 3358ff59090SHerbert Xu 3368ff59090SHerbert Xu lock_sock(sk); 3378ff59090SHerbert Xu if (!ctx->more && ctx->used) 3388ff59090SHerbert Xu goto unlock; 3398ff59090SHerbert Xu 340f26b7b80SStephan Mueller if (init) { 3418ff59090SHerbert Xu ctx->enc = enc; 3428ff59090SHerbert Xu if (con.iv) 3438ff59090SHerbert Xu memcpy(ctx->iv, con.iv->iv, ivsize); 3448ff59090SHerbert Xu } 3458ff59090SHerbert Xu 3468ff59090SHerbert Xu while (size) { 3478ff59090SHerbert Xu struct scatterlist *sg; 3488ff59090SHerbert Xu unsigned long len = size; 349652d5b8aSLABBE Corentin size_t plen; 3508ff59090SHerbert Xu 3518ff59090SHerbert Xu if (ctx->merge) { 3528ff59090SHerbert Xu sgl = list_entry(ctx->tsgl.prev, 3538ff59090SHerbert Xu struct skcipher_sg_list, list); 3548ff59090SHerbert Xu sg = sgl->sg + sgl->cur - 1; 3558ff59090SHerbert Xu len = min_t(unsigned long, len, 3568ff59090SHerbert Xu PAGE_SIZE - sg->offset - sg->length); 3578ff59090SHerbert Xu 3586ce8e9ceSAl Viro err = memcpy_from_msg(page_address(sg_page(sg)) + 3598ff59090SHerbert Xu sg->offset + sg->length, 3606ce8e9ceSAl Viro msg, len); 3618ff59090SHerbert Xu if (err) 3628ff59090SHerbert Xu goto unlock; 3638ff59090SHerbert Xu 3648ff59090SHerbert Xu sg->length += len; 3658ff59090SHerbert Xu ctx->merge = (sg->offset + sg->length) & 3668ff59090SHerbert Xu (PAGE_SIZE - 1); 3678ff59090SHerbert Xu 3688ff59090SHerbert Xu ctx->used += len; 3698ff59090SHerbert Xu copied += len; 3708ff59090SHerbert Xu size -= len; 3718ff59090SHerbert Xu continue; 3728ff59090SHerbert Xu } 3738ff59090SHerbert Xu 3740f6bb83cSHerbert Xu if (!skcipher_writable(sk)) { 3758ff59090SHerbert Xu err = skcipher_wait_for_wmem(sk, msg->msg_flags); 3768ff59090SHerbert Xu if (err) 3778ff59090SHerbert Xu goto unlock; 3788ff59090SHerbert Xu } 3798ff59090SHerbert Xu 3800f6bb83cSHerbert Xu len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 3818ff59090SHerbert Xu 3828ff59090SHerbert Xu err = skcipher_alloc_sgl(sk); 3838ff59090SHerbert Xu if (err) 3848ff59090SHerbert Xu goto unlock; 3858ff59090SHerbert Xu 3868ff59090SHerbert Xu sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 3878ff59090SHerbert Xu sg = sgl->sg; 388202736d9SHerbert Xu if (sgl->cur) 389202736d9SHerbert Xu sg_unmark_end(sg + sgl->cur - 1); 3908ff59090SHerbert Xu do { 3918ff59090SHerbert Xu i = sgl->cur; 392652d5b8aSLABBE Corentin plen = min_t(size_t, len, PAGE_SIZE); 3938ff59090SHerbert Xu 3948ff59090SHerbert Xu sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 3958ff59090SHerbert Xu err = -ENOMEM; 3968ff59090SHerbert Xu if (!sg_page(sg + i)) 3978ff59090SHerbert Xu goto unlock; 3988ff59090SHerbert Xu 3996ce8e9ceSAl Viro err = memcpy_from_msg(page_address(sg_page(sg + i)), 4006ce8e9ceSAl Viro msg, plen); 4018ff59090SHerbert Xu if (err) { 4028ff59090SHerbert Xu __free_page(sg_page(sg + i)); 4038ff59090SHerbert Xu sg_assign_page(sg + i, NULL); 4048ff59090SHerbert Xu goto unlock; 4058ff59090SHerbert Xu } 4068ff59090SHerbert Xu 4078ff59090SHerbert Xu sg[i].length = plen; 4088ff59090SHerbert Xu len -= plen; 4098ff59090SHerbert Xu ctx->used += plen; 4108ff59090SHerbert Xu copied += plen; 4118ff59090SHerbert Xu size -= plen; 4128ff59090SHerbert Xu sgl->cur++; 4138ff59090SHerbert Xu } while (len && sgl->cur < MAX_SGL_ENTS); 4148ff59090SHerbert Xu 4150f477b65STadeusz Struk if (!size) 4160f477b65STadeusz Struk sg_mark_end(sg + sgl->cur - 1); 4170f477b65STadeusz Struk 4188ff59090SHerbert Xu ctx->merge = plen & (PAGE_SIZE - 1); 4198ff59090SHerbert Xu } 4208ff59090SHerbert Xu 4218ff59090SHerbert Xu err = 0; 4228ff59090SHerbert Xu 4238ff59090SHerbert Xu ctx->more = msg->msg_flags & MSG_MORE; 4248ff59090SHerbert Xu 4258ff59090SHerbert Xu unlock: 4268ff59090SHerbert Xu skcipher_data_wakeup(sk); 4278ff59090SHerbert Xu release_sock(sk); 4288ff59090SHerbert Xu 4298ff59090SHerbert Xu return copied ?: err; 4308ff59090SHerbert Xu } 4318ff59090SHerbert Xu 4328ff59090SHerbert Xu static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, 4338ff59090SHerbert Xu int offset, size_t size, int flags) 4348ff59090SHerbert Xu { 4358ff59090SHerbert Xu struct sock *sk = sock->sk; 4368ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 4378ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 4388ff59090SHerbert Xu struct skcipher_sg_list *sgl; 4398ff59090SHerbert Xu int err = -EINVAL; 4408ff59090SHerbert Xu 441d3f7d56aSShawn Landden if (flags & MSG_SENDPAGE_NOTLAST) 442d3f7d56aSShawn Landden flags |= MSG_MORE; 443d3f7d56aSShawn Landden 4448ff59090SHerbert Xu lock_sock(sk); 4458ff59090SHerbert Xu if (!ctx->more && ctx->used) 4468ff59090SHerbert Xu goto unlock; 4478ff59090SHerbert Xu 4488ff59090SHerbert Xu if (!size) 4498ff59090SHerbert Xu goto done; 4508ff59090SHerbert Xu 4510f6bb83cSHerbert Xu if (!skcipher_writable(sk)) { 4528ff59090SHerbert Xu err = skcipher_wait_for_wmem(sk, flags); 4538ff59090SHerbert Xu if (err) 4548ff59090SHerbert Xu goto unlock; 4558ff59090SHerbert Xu } 4568ff59090SHerbert Xu 4578ff59090SHerbert Xu err = skcipher_alloc_sgl(sk); 4588ff59090SHerbert Xu if (err) 4598ff59090SHerbert Xu goto unlock; 4608ff59090SHerbert Xu 4618ff59090SHerbert Xu ctx->merge = 0; 4628ff59090SHerbert Xu sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); 4638ff59090SHerbert Xu 4640f477b65STadeusz Struk if (sgl->cur) 4650f477b65STadeusz Struk sg_unmark_end(sgl->sg + sgl->cur - 1); 4660f477b65STadeusz Struk 4670f477b65STadeusz Struk sg_mark_end(sgl->sg + sgl->cur); 4688ff59090SHerbert Xu get_page(page); 4698ff59090SHerbert Xu sg_set_page(sgl->sg + sgl->cur, page, size, offset); 4708ff59090SHerbert Xu sgl->cur++; 4718ff59090SHerbert Xu ctx->used += size; 4728ff59090SHerbert Xu 4738ff59090SHerbert Xu done: 4748ff59090SHerbert Xu ctx->more = flags & MSG_MORE; 4758ff59090SHerbert Xu 4768ff59090SHerbert Xu unlock: 4778ff59090SHerbert Xu skcipher_data_wakeup(sk); 4788ff59090SHerbert Xu release_sock(sk); 4798ff59090SHerbert Xu 4808ff59090SHerbert Xu return err ?: size; 4818ff59090SHerbert Xu } 4828ff59090SHerbert Xu 483a596999bSTadeusz Struk static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) 484a596999bSTadeusz Struk { 485a596999bSTadeusz Struk struct skcipher_sg_list *sgl; 486a596999bSTadeusz Struk struct scatterlist *sg; 487a596999bSTadeusz Struk int nents = 0; 488a596999bSTadeusz Struk 489a596999bSTadeusz Struk list_for_each_entry(sgl, &ctx->tsgl, list) { 490a596999bSTadeusz Struk sg = sgl->sg; 491a596999bSTadeusz Struk 492a596999bSTadeusz Struk while (!sg->length) 493a596999bSTadeusz Struk sg++; 494a596999bSTadeusz Struk 495a596999bSTadeusz Struk nents += sg_nents(sg); 496a596999bSTadeusz Struk } 497a596999bSTadeusz Struk return nents; 498a596999bSTadeusz Struk } 499a596999bSTadeusz Struk 500a596999bSTadeusz Struk static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, 501a596999bSTadeusz Struk int flags) 502a596999bSTadeusz Struk { 503a596999bSTadeusz Struk struct sock *sk = sock->sk; 504a596999bSTadeusz Struk struct alg_sock *ask = alg_sk(sk); 505ec69bbfbSHerbert Xu struct sock *psk = ask->parent; 506ec69bbfbSHerbert Xu struct alg_sock *pask = alg_sk(psk); 507a596999bSTadeusz Struk struct skcipher_ctx *ctx = ask->private; 508ec69bbfbSHerbert Xu struct skcipher_tfm *skc = pask->private; 509ec69bbfbSHerbert Xu struct crypto_skcipher *tfm = skc->skcipher; 510a596999bSTadeusz Struk struct skcipher_sg_list *sgl; 511a596999bSTadeusz Struk struct scatterlist *sg; 512a596999bSTadeusz Struk struct skcipher_async_req *sreq; 5130d96e4baSHerbert Xu struct skcipher_request *req; 514a596999bSTadeusz Struk struct skcipher_async_rsgl *last_rsgl = NULL; 5156454c2b8SHerbert Xu unsigned int txbufs = 0, len = 0, tx_nents; 516ec69bbfbSHerbert Xu unsigned int reqsize = crypto_skcipher_reqsize(tfm); 517ec69bbfbSHerbert Xu unsigned int ivsize = crypto_skcipher_ivsize(tfm); 518a596999bSTadeusz Struk int err = -ENOMEM; 519033f46b3Stadeusz.struk@intel.com bool mark = false; 520ec69bbfbSHerbert Xu char *iv; 521ec69bbfbSHerbert Xu 522ec69bbfbSHerbert Xu sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); 523ec69bbfbSHerbert Xu if (unlikely(!sreq)) 524ec69bbfbSHerbert Xu goto out; 525ec69bbfbSHerbert Xu 526ec69bbfbSHerbert Xu req = &sreq->req; 527ec69bbfbSHerbert Xu iv = (char *)(req + 1) + reqsize; 528ec69bbfbSHerbert Xu sreq->iocb = msg->msg_iocb; 529ec69bbfbSHerbert Xu INIT_LIST_HEAD(&sreq->list); 530ec69bbfbSHerbert Xu sreq->inflight = &ctx->inflight; 531a596999bSTadeusz Struk 532a596999bSTadeusz Struk lock_sock(sk); 5336454c2b8SHerbert Xu tx_nents = skcipher_all_sg_nents(ctx); 534a596999bSTadeusz Struk sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); 535ec69bbfbSHerbert Xu if (unlikely(!sreq->tsg)) 536a596999bSTadeusz Struk goto unlock; 537a596999bSTadeusz Struk sg_init_table(sreq->tsg, tx_nents); 538ec69bbfbSHerbert Xu memcpy(iv, ctx->iv, ivsize); 539ec69bbfbSHerbert Xu skcipher_request_set_tfm(req, tfm); 540dad41997SHerbert Xu skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, 541ec69bbfbSHerbert Xu skcipher_async_cb, sreq); 542a596999bSTadeusz Struk 543a596999bSTadeusz Struk while (iov_iter_count(&msg->msg_iter)) { 544a596999bSTadeusz Struk struct skcipher_async_rsgl *rsgl; 545ac110f49Stadeusz.struk@intel.com int used; 546a596999bSTadeusz Struk 547a596999bSTadeusz Struk if (!ctx->used) { 548a596999bSTadeusz Struk err = skcipher_wait_for_data(sk, flags); 549a596999bSTadeusz Struk if (err) 550a596999bSTadeusz Struk goto free; 551a596999bSTadeusz Struk } 552a596999bSTadeusz Struk sgl = list_first_entry(&ctx->tsgl, 553a596999bSTadeusz Struk struct skcipher_sg_list, list); 554a596999bSTadeusz Struk sg = sgl->sg; 555a596999bSTadeusz Struk 556a596999bSTadeusz Struk while (!sg->length) 557a596999bSTadeusz Struk sg++; 558a596999bSTadeusz Struk 559a596999bSTadeusz Struk used = min_t(unsigned long, ctx->used, 560a596999bSTadeusz Struk iov_iter_count(&msg->msg_iter)); 561a596999bSTadeusz Struk used = min_t(unsigned long, used, sg->length); 562a596999bSTadeusz Struk 563033f46b3Stadeusz.struk@intel.com if (txbufs == tx_nents) { 564a596999bSTadeusz Struk struct scatterlist *tmp; 565a596999bSTadeusz Struk int x; 566a596999bSTadeusz Struk /* Ran out of tx slots in async request 567a596999bSTadeusz Struk * need to expand */ 568a596999bSTadeusz Struk tmp = kcalloc(tx_nents * 2, sizeof(*tmp), 569a596999bSTadeusz Struk GFP_KERNEL); 570e2c1b823SPan Bian if (!tmp) { 571e2c1b823SPan Bian err = -ENOMEM; 572a596999bSTadeusz Struk goto free; 573e2c1b823SPan Bian } 574a596999bSTadeusz Struk 575a596999bSTadeusz Struk sg_init_table(tmp, tx_nents * 2); 576a596999bSTadeusz Struk for (x = 0; x < tx_nents; x++) 577a596999bSTadeusz Struk sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), 578a596999bSTadeusz Struk sreq->tsg[x].length, 579a596999bSTadeusz Struk sreq->tsg[x].offset); 580a596999bSTadeusz Struk kfree(sreq->tsg); 581a596999bSTadeusz Struk sreq->tsg = tmp; 582a596999bSTadeusz Struk tx_nents *= 2; 583033f46b3Stadeusz.struk@intel.com mark = true; 584a596999bSTadeusz Struk } 585a596999bSTadeusz Struk /* Need to take over the tx sgl from ctx 586a596999bSTadeusz Struk * to the asynch req - these sgls will be freed later */ 587033f46b3Stadeusz.struk@intel.com sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, 588a596999bSTadeusz Struk sg->offset); 589a596999bSTadeusz Struk 590a596999bSTadeusz Struk if (list_empty(&sreq->list)) { 591a596999bSTadeusz Struk rsgl = &sreq->first_sgl; 592a596999bSTadeusz Struk list_add_tail(&rsgl->list, &sreq->list); 593a596999bSTadeusz Struk } else { 59482d92920STadeusz Struk rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); 595a596999bSTadeusz Struk if (!rsgl) { 596a596999bSTadeusz Struk err = -ENOMEM; 597a596999bSTadeusz Struk goto free; 598a596999bSTadeusz Struk } 599a596999bSTadeusz Struk list_add_tail(&rsgl->list, &sreq->list); 600a596999bSTadeusz Struk } 601a596999bSTadeusz Struk 602a596999bSTadeusz Struk used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); 603a596999bSTadeusz Struk err = used; 604a596999bSTadeusz Struk if (used < 0) 605a596999bSTadeusz Struk goto free; 606a596999bSTadeusz Struk if (last_rsgl) 607a596999bSTadeusz Struk af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 608a596999bSTadeusz Struk 609a596999bSTadeusz Struk last_rsgl = rsgl; 610a596999bSTadeusz Struk len += used; 611a596999bSTadeusz Struk skcipher_pull_sgl(sk, used, 0); 612a596999bSTadeusz Struk iov_iter_advance(&msg->msg_iter, used); 613a596999bSTadeusz Struk } 614a596999bSTadeusz Struk 615033f46b3Stadeusz.struk@intel.com if (mark) 616033f46b3Stadeusz.struk@intel.com sg_mark_end(sreq->tsg + txbufs - 1); 617033f46b3Stadeusz.struk@intel.com 6180d96e4baSHerbert Xu skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, 619ec69bbfbSHerbert Xu len, iv); 6200d96e4baSHerbert Xu err = ctx->enc ? crypto_skcipher_encrypt(req) : 6210d96e4baSHerbert Xu crypto_skcipher_decrypt(req); 622a596999bSTadeusz Struk if (err == -EINPROGRESS) { 623a596999bSTadeusz Struk atomic_inc(&ctx->inflight); 624a596999bSTadeusz Struk err = -EIOCBQUEUED; 625ec69bbfbSHerbert Xu sreq = NULL; 626a596999bSTadeusz Struk goto unlock; 627a596999bSTadeusz Struk } 628a596999bSTadeusz Struk free: 629a596999bSTadeusz Struk skcipher_free_async_sgls(sreq); 630a596999bSTadeusz Struk unlock: 631a596999bSTadeusz Struk skcipher_wmem_wakeup(sk); 632a596999bSTadeusz Struk release_sock(sk); 633ec69bbfbSHerbert Xu kzfree(sreq); 634ec69bbfbSHerbert Xu out: 635a596999bSTadeusz Struk return err; 636a596999bSTadeusz Struk } 637a596999bSTadeusz Struk 638a596999bSTadeusz Struk static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, 639a596999bSTadeusz Struk int flags) 6408ff59090SHerbert Xu { 6418ff59090SHerbert Xu struct sock *sk = sock->sk; 6428ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 6436454c2b8SHerbert Xu struct sock *psk = ask->parent; 6446454c2b8SHerbert Xu struct alg_sock *pask = alg_sk(psk); 6458ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 6466454c2b8SHerbert Xu struct skcipher_tfm *skc = pask->private; 6476454c2b8SHerbert Xu struct crypto_skcipher *tfm = skc->skcipher; 6486454c2b8SHerbert Xu unsigned bs = crypto_skcipher_blocksize(tfm); 6498ff59090SHerbert Xu struct skcipher_sg_list *sgl; 6508ff59090SHerbert Xu struct scatterlist *sg; 6518ff59090SHerbert Xu int err = -EAGAIN; 6528ff59090SHerbert Xu int used; 6538ff59090SHerbert Xu long copied = 0; 6548ff59090SHerbert Xu 6558ff59090SHerbert Xu lock_sock(sk); 65601e97e65SAl Viro while (msg_data_left(msg)) { 6579399f0c5SLinus Torvalds if (!ctx->used) { 6588ff59090SHerbert Xu err = skcipher_wait_for_data(sk, flags); 6598ff59090SHerbert Xu if (err) 6608ff59090SHerbert Xu goto unlock; 6618ff59090SHerbert Xu } 6628ff59090SHerbert Xu 66301e97e65SAl Viro used = min_t(unsigned long, ctx->used, msg_data_left(msg)); 6648ff59090SHerbert Xu 6651d10eb2fSAl Viro used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); 666bc97e57eSHerbert Xu err = used; 667bc97e57eSHerbert Xu if (err < 0) 668bc97e57eSHerbert Xu goto unlock; 669bc97e57eSHerbert Xu 6708ff59090SHerbert Xu if (ctx->more || used < ctx->used) 6718ff59090SHerbert Xu used -= used % bs; 6728ff59090SHerbert Xu 6738ff59090SHerbert Xu err = -EINVAL; 6748ff59090SHerbert Xu if (!used) 675bc97e57eSHerbert Xu goto free; 6768ff59090SHerbert Xu 6774f0414e5SHerbert Xu sgl = list_first_entry(&ctx->tsgl, 6784f0414e5SHerbert Xu struct skcipher_sg_list, list); 6794f0414e5SHerbert Xu sg = sgl->sg; 6804f0414e5SHerbert Xu 6814f0414e5SHerbert Xu while (!sg->length) 6824f0414e5SHerbert Xu sg++; 6834f0414e5SHerbert Xu 6840d96e4baSHerbert Xu skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, 6858ff59090SHerbert Xu ctx->iv); 6868ff59090SHerbert Xu 6878ff59090SHerbert Xu err = af_alg_wait_for_completion( 6888ff59090SHerbert Xu ctx->enc ? 6890d96e4baSHerbert Xu crypto_skcipher_encrypt(&ctx->req) : 6900d96e4baSHerbert Xu crypto_skcipher_decrypt(&ctx->req), 6918ff59090SHerbert Xu &ctx->completion); 6928ff59090SHerbert Xu 693bc97e57eSHerbert Xu free: 6948ff59090SHerbert Xu af_alg_free_sg(&ctx->rsgl); 6958ff59090SHerbert Xu 6968ff59090SHerbert Xu if (err) 6978ff59090SHerbert Xu goto unlock; 6988ff59090SHerbert Xu 6998ff59090SHerbert Xu copied += used; 700a596999bSTadeusz Struk skcipher_pull_sgl(sk, used, 1); 7011d10eb2fSAl Viro iov_iter_advance(&msg->msg_iter, used); 7028ff59090SHerbert Xu } 7038ff59090SHerbert Xu 7048ff59090SHerbert Xu err = 0; 7058ff59090SHerbert Xu 7068ff59090SHerbert Xu unlock: 7078ff59090SHerbert Xu skcipher_wmem_wakeup(sk); 7088ff59090SHerbert Xu release_sock(sk); 7098ff59090SHerbert Xu 7108ff59090SHerbert Xu return copied ?: err; 7118ff59090SHerbert Xu } 7128ff59090SHerbert Xu 713a596999bSTadeusz Struk static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 714a596999bSTadeusz Struk size_t ignored, int flags) 715a596999bSTadeusz Struk { 716a596999bSTadeusz Struk return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? 717a596999bSTadeusz Struk skcipher_recvmsg_async(sock, msg, flags) : 718a596999bSTadeusz Struk skcipher_recvmsg_sync(sock, msg, flags); 719a596999bSTadeusz Struk } 7208ff59090SHerbert Xu 7218ff59090SHerbert Xu static unsigned int skcipher_poll(struct file *file, struct socket *sock, 7228ff59090SHerbert Xu poll_table *wait) 7238ff59090SHerbert Xu { 7248ff59090SHerbert Xu struct sock *sk = sock->sk; 7258ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 7268ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 7278ff59090SHerbert Xu unsigned int mask; 7288ff59090SHerbert Xu 7298ff59090SHerbert Xu sock_poll_wait(file, sk_sleep(sk), wait); 7308ff59090SHerbert Xu mask = 0; 7318ff59090SHerbert Xu 7328ff59090SHerbert Xu if (ctx->used) 7338ff59090SHerbert Xu mask |= POLLIN | POLLRDNORM; 7348ff59090SHerbert Xu 7358ff59090SHerbert Xu if (skcipher_writable(sk)) 7368ff59090SHerbert Xu mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 7378ff59090SHerbert Xu 7388ff59090SHerbert Xu return mask; 7398ff59090SHerbert Xu } 7408ff59090SHerbert Xu 7418ff59090SHerbert Xu static struct proto_ops algif_skcipher_ops = { 7428ff59090SHerbert Xu .family = PF_ALG, 7438ff59090SHerbert Xu 7448ff59090SHerbert Xu .connect = sock_no_connect, 7458ff59090SHerbert Xu .socketpair = sock_no_socketpair, 7468ff59090SHerbert Xu .getname = sock_no_getname, 7478ff59090SHerbert Xu .ioctl = sock_no_ioctl, 7488ff59090SHerbert Xu .listen = sock_no_listen, 7498ff59090SHerbert Xu .shutdown = sock_no_shutdown, 7508ff59090SHerbert Xu .getsockopt = sock_no_getsockopt, 7518ff59090SHerbert Xu .mmap = sock_no_mmap, 7528ff59090SHerbert Xu .bind = sock_no_bind, 7538ff59090SHerbert Xu .accept = sock_no_accept, 7548ff59090SHerbert Xu .setsockopt = sock_no_setsockopt, 7558ff59090SHerbert Xu 7568ff59090SHerbert Xu .release = af_alg_release, 7578ff59090SHerbert Xu .sendmsg = skcipher_sendmsg, 7588ff59090SHerbert Xu .sendpage = skcipher_sendpage, 7598ff59090SHerbert Xu .recvmsg = skcipher_recvmsg, 7608ff59090SHerbert Xu .poll = skcipher_poll, 7618ff59090SHerbert Xu }; 7628ff59090SHerbert Xu 763a0fa2d03SHerbert Xu static int skcipher_check_key(struct socket *sock) 764a0fa2d03SHerbert Xu { 7651822793aSHerbert Xu int err = 0; 766a0fa2d03SHerbert Xu struct sock *psk; 767a0fa2d03SHerbert Xu struct alg_sock *pask; 768a0fa2d03SHerbert Xu struct skcipher_tfm *tfm; 769a0fa2d03SHerbert Xu struct sock *sk = sock->sk; 770a0fa2d03SHerbert Xu struct alg_sock *ask = alg_sk(sk); 771a0fa2d03SHerbert Xu 7721822793aSHerbert Xu lock_sock(sk); 773a0fa2d03SHerbert Xu if (ask->refcnt) 7741822793aSHerbert Xu goto unlock_child; 775a0fa2d03SHerbert Xu 776a0fa2d03SHerbert Xu psk = ask->parent; 777a0fa2d03SHerbert Xu pask = alg_sk(ask->parent); 778a0fa2d03SHerbert Xu tfm = pask->private; 779a0fa2d03SHerbert Xu 780a0fa2d03SHerbert Xu err = -ENOKEY; 7811822793aSHerbert Xu lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 782a0fa2d03SHerbert Xu if (!tfm->has_key) 783a0fa2d03SHerbert Xu goto unlock; 784a0fa2d03SHerbert Xu 785a0fa2d03SHerbert Xu if (!pask->refcnt++) 786a0fa2d03SHerbert Xu sock_hold(psk); 787a0fa2d03SHerbert Xu 788a0fa2d03SHerbert Xu ask->refcnt = 1; 789a0fa2d03SHerbert Xu sock_put(psk); 790a0fa2d03SHerbert Xu 791a0fa2d03SHerbert Xu err = 0; 792a0fa2d03SHerbert Xu 793a0fa2d03SHerbert Xu unlock: 794a0fa2d03SHerbert Xu release_sock(psk); 7951822793aSHerbert Xu unlock_child: 7961822793aSHerbert Xu release_sock(sk); 797a0fa2d03SHerbert Xu 798a0fa2d03SHerbert Xu return err; 799a0fa2d03SHerbert Xu } 800a0fa2d03SHerbert Xu 801a0fa2d03SHerbert Xu static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 802a0fa2d03SHerbert Xu size_t size) 803a0fa2d03SHerbert Xu { 804a0fa2d03SHerbert Xu int err; 805a0fa2d03SHerbert Xu 806a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 807a0fa2d03SHerbert Xu if (err) 808a0fa2d03SHerbert Xu return err; 809a0fa2d03SHerbert Xu 810a0fa2d03SHerbert Xu return skcipher_sendmsg(sock, msg, size); 811a0fa2d03SHerbert Xu } 812a0fa2d03SHerbert Xu 813a0fa2d03SHerbert Xu static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, 814a0fa2d03SHerbert Xu int offset, size_t size, int flags) 815a0fa2d03SHerbert Xu { 816a0fa2d03SHerbert Xu int err; 817a0fa2d03SHerbert Xu 818a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 819a0fa2d03SHerbert Xu if (err) 820a0fa2d03SHerbert Xu return err; 821a0fa2d03SHerbert Xu 822a0fa2d03SHerbert Xu return skcipher_sendpage(sock, page, offset, size, flags); 823a0fa2d03SHerbert Xu } 824a0fa2d03SHerbert Xu 825a0fa2d03SHerbert Xu static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 826a0fa2d03SHerbert Xu size_t ignored, int flags) 827a0fa2d03SHerbert Xu { 828a0fa2d03SHerbert Xu int err; 829a0fa2d03SHerbert Xu 830a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 831a0fa2d03SHerbert Xu if (err) 832a0fa2d03SHerbert Xu return err; 833a0fa2d03SHerbert Xu 834a0fa2d03SHerbert Xu return skcipher_recvmsg(sock, msg, ignored, flags); 835a0fa2d03SHerbert Xu } 836a0fa2d03SHerbert Xu 837a0fa2d03SHerbert Xu static struct proto_ops algif_skcipher_ops_nokey = { 838a0fa2d03SHerbert Xu .family = PF_ALG, 839a0fa2d03SHerbert Xu 840a0fa2d03SHerbert Xu .connect = sock_no_connect, 841a0fa2d03SHerbert Xu .socketpair = sock_no_socketpair, 842a0fa2d03SHerbert Xu .getname = sock_no_getname, 843a0fa2d03SHerbert Xu .ioctl = sock_no_ioctl, 844a0fa2d03SHerbert Xu .listen = sock_no_listen, 845a0fa2d03SHerbert Xu .shutdown = sock_no_shutdown, 846a0fa2d03SHerbert Xu .getsockopt = sock_no_getsockopt, 847a0fa2d03SHerbert Xu .mmap = sock_no_mmap, 848a0fa2d03SHerbert Xu .bind = sock_no_bind, 849a0fa2d03SHerbert Xu .accept = sock_no_accept, 850a0fa2d03SHerbert Xu .setsockopt = sock_no_setsockopt, 851a0fa2d03SHerbert Xu 852a0fa2d03SHerbert Xu .release = af_alg_release, 853a0fa2d03SHerbert Xu .sendmsg = skcipher_sendmsg_nokey, 854a0fa2d03SHerbert Xu .sendpage = skcipher_sendpage_nokey, 855a0fa2d03SHerbert Xu .recvmsg = skcipher_recvmsg_nokey, 856a0fa2d03SHerbert Xu .poll = skcipher_poll, 857a0fa2d03SHerbert Xu }; 858a0fa2d03SHerbert Xu 8598ff59090SHerbert Xu static void *skcipher_bind(const char *name, u32 type, u32 mask) 8608ff59090SHerbert Xu { 861dd504589SHerbert Xu struct skcipher_tfm *tfm; 862dd504589SHerbert Xu struct crypto_skcipher *skcipher; 863dd504589SHerbert Xu 864dd504589SHerbert Xu tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 865dd504589SHerbert Xu if (!tfm) 866dd504589SHerbert Xu return ERR_PTR(-ENOMEM); 867dd504589SHerbert Xu 868dd504589SHerbert Xu skcipher = crypto_alloc_skcipher(name, type, mask); 869dd504589SHerbert Xu if (IS_ERR(skcipher)) { 870dd504589SHerbert Xu kfree(tfm); 871dd504589SHerbert Xu return ERR_CAST(skcipher); 872dd504589SHerbert Xu } 873dd504589SHerbert Xu 874dd504589SHerbert Xu tfm->skcipher = skcipher; 875dd504589SHerbert Xu 876dd504589SHerbert Xu return tfm; 8778ff59090SHerbert Xu } 8788ff59090SHerbert Xu 8798ff59090SHerbert Xu static void skcipher_release(void *private) 8808ff59090SHerbert Xu { 881dd504589SHerbert Xu struct skcipher_tfm *tfm = private; 882dd504589SHerbert Xu 883dd504589SHerbert Xu crypto_free_skcipher(tfm->skcipher); 884dd504589SHerbert Xu kfree(tfm); 8858ff59090SHerbert Xu } 8868ff59090SHerbert Xu 8878ff59090SHerbert Xu static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 8888ff59090SHerbert Xu { 889dd504589SHerbert Xu struct skcipher_tfm *tfm = private; 890dd504589SHerbert Xu int err; 891dd504589SHerbert Xu 892dd504589SHerbert Xu err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); 893dd504589SHerbert Xu tfm->has_key = !err; 894dd504589SHerbert Xu 895dd504589SHerbert Xu return err; 8968ff59090SHerbert Xu } 8978ff59090SHerbert Xu 898a596999bSTadeusz Struk static void skcipher_wait(struct sock *sk) 899a596999bSTadeusz Struk { 900a596999bSTadeusz Struk struct alg_sock *ask = alg_sk(sk); 901a596999bSTadeusz Struk struct skcipher_ctx *ctx = ask->private; 902a596999bSTadeusz Struk int ctr = 0; 903a596999bSTadeusz Struk 904a596999bSTadeusz Struk while (atomic_read(&ctx->inflight) && ctr++ < 100) 905a596999bSTadeusz Struk msleep(100); 906a596999bSTadeusz Struk } 907a596999bSTadeusz Struk 9088ff59090SHerbert Xu static void skcipher_sock_destruct(struct sock *sk) 9098ff59090SHerbert Xu { 9108ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 9118ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 9120d96e4baSHerbert Xu struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); 9138ff59090SHerbert Xu 914a596999bSTadeusz Struk if (atomic_read(&ctx->inflight)) 915a596999bSTadeusz Struk skcipher_wait(sk); 916a596999bSTadeusz Struk 9178ff59090SHerbert Xu skcipher_free_sgl(sk); 9180d96e4baSHerbert Xu sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 9198ff59090SHerbert Xu sock_kfree_s(sk, ctx, ctx->len); 9208ff59090SHerbert Xu af_alg_release_parent(sk); 9218ff59090SHerbert Xu } 9228ff59090SHerbert Xu 923d7b65aeeSHerbert Xu static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 9248ff59090SHerbert Xu { 9258ff59090SHerbert Xu struct skcipher_ctx *ctx; 9268ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 927dd504589SHerbert Xu struct skcipher_tfm *tfm = private; 928dd504589SHerbert Xu struct crypto_skcipher *skcipher = tfm->skcipher; 929dd504589SHerbert Xu unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); 9308ff59090SHerbert Xu 9318ff59090SHerbert Xu ctx = sock_kmalloc(sk, len, GFP_KERNEL); 9328ff59090SHerbert Xu if (!ctx) 9338ff59090SHerbert Xu return -ENOMEM; 9348ff59090SHerbert Xu 935dd504589SHerbert Xu ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), 9368ff59090SHerbert Xu GFP_KERNEL); 9378ff59090SHerbert Xu if (!ctx->iv) { 9388ff59090SHerbert Xu sock_kfree_s(sk, ctx, len); 9398ff59090SHerbert Xu return -ENOMEM; 9408ff59090SHerbert Xu } 9418ff59090SHerbert Xu 942dd504589SHerbert Xu memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); 9438ff59090SHerbert Xu 9448ff59090SHerbert Xu INIT_LIST_HEAD(&ctx->tsgl); 9458ff59090SHerbert Xu ctx->len = len; 9468ff59090SHerbert Xu ctx->used = 0; 9478ff59090SHerbert Xu ctx->more = 0; 9488ff59090SHerbert Xu ctx->merge = 0; 9498ff59090SHerbert Xu ctx->enc = 0; 950a596999bSTadeusz Struk atomic_set(&ctx->inflight, 0); 9518ff59090SHerbert Xu af_alg_init_completion(&ctx->completion); 9528ff59090SHerbert Xu 9538ff59090SHerbert Xu ask->private = ctx; 9548ff59090SHerbert Xu 955dd504589SHerbert Xu skcipher_request_set_tfm(&ctx->req, skcipher); 956dad41997SHerbert Xu skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | 957dad41997SHerbert Xu CRYPTO_TFM_REQ_MAY_BACKLOG, 9588ff59090SHerbert Xu af_alg_complete, &ctx->completion); 9598ff59090SHerbert Xu 9608ff59090SHerbert Xu sk->sk_destruct = skcipher_sock_destruct; 9618ff59090SHerbert Xu 9628ff59090SHerbert Xu return 0; 9638ff59090SHerbert Xu } 9648ff59090SHerbert Xu 965a0fa2d03SHerbert Xu static int skcipher_accept_parent(void *private, struct sock *sk) 966a0fa2d03SHerbert Xu { 967a0fa2d03SHerbert Xu struct skcipher_tfm *tfm = private; 968a0fa2d03SHerbert Xu 9696e8d8ecfSHerbert Xu if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) 970a0fa2d03SHerbert Xu return -ENOKEY; 971a0fa2d03SHerbert Xu 972d7b65aeeSHerbert Xu return skcipher_accept_parent_nokey(private, sk); 973a0fa2d03SHerbert Xu } 974a0fa2d03SHerbert Xu 9758ff59090SHerbert Xu static const struct af_alg_type algif_type_skcipher = { 9768ff59090SHerbert Xu .bind = skcipher_bind, 9778ff59090SHerbert Xu .release = skcipher_release, 9788ff59090SHerbert Xu .setkey = skcipher_setkey, 9798ff59090SHerbert Xu .accept = skcipher_accept_parent, 980a0fa2d03SHerbert Xu .accept_nokey = skcipher_accept_parent_nokey, 9818ff59090SHerbert Xu .ops = &algif_skcipher_ops, 982a0fa2d03SHerbert Xu .ops_nokey = &algif_skcipher_ops_nokey, 9838ff59090SHerbert Xu .name = "skcipher", 9848ff59090SHerbert Xu .owner = THIS_MODULE 9858ff59090SHerbert Xu }; 9868ff59090SHerbert Xu 9878ff59090SHerbert Xu static int __init algif_skcipher_init(void) 9888ff59090SHerbert Xu { 9898ff59090SHerbert Xu return af_alg_register_type(&algif_type_skcipher); 9908ff59090SHerbert Xu } 9918ff59090SHerbert Xu 9928ff59090SHerbert Xu static void __exit algif_skcipher_exit(void) 9938ff59090SHerbert Xu { 9948ff59090SHerbert Xu int err = af_alg_unregister_type(&algif_type_skcipher); 9958ff59090SHerbert Xu BUG_ON(err); 9968ff59090SHerbert Xu } 9978ff59090SHerbert Xu 9988ff59090SHerbert Xu module_init(algif_skcipher_init); 9998ff59090SHerbert Xu module_exit(algif_skcipher_exit); 10008ff59090SHerbert Xu MODULE_LICENSE("GPL"); 1001