18ff59090SHerbert Xu /* 28ff59090SHerbert Xu * algif_skcipher: User-space interface for skcipher algorithms 38ff59090SHerbert Xu * 48ff59090SHerbert Xu * This file provides the user-space API for symmetric key ciphers. 58ff59090SHerbert Xu * 68ff59090SHerbert Xu * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 78ff59090SHerbert Xu * 88ff59090SHerbert Xu * This program is free software; you can redistribute it and/or modify it 98ff59090SHerbert Xu * under the terms of the GNU General Public License as published by the Free 108ff59090SHerbert Xu * Software Foundation; either version 2 of the License, or (at your option) 118ff59090SHerbert Xu * any later version. 128ff59090SHerbert Xu * 13e870456dSStephan Mueller * The following concept of the memory management is used: 14e870456dSStephan Mueller * 15e870456dSStephan Mueller * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is 16e870456dSStephan Mueller * filled by user space with the data submitted via sendpage/sendmsg. Filling 17e870456dSStephan Mueller * up the TX SGL does not cause a crypto operation -- the data will only be 18e870456dSStephan Mueller * tracked by the kernel. Upon receipt of one recvmsg call, the caller must 19e870456dSStephan Mueller * provide a buffer which is tracked with the RX SGL. 20e870456dSStephan Mueller * 21e870456dSStephan Mueller * During the processing of the recvmsg operation, the cipher request is 22e870456dSStephan Mueller * allocated and prepared. As part of the recvmsg operation, the processed 23e870456dSStephan Mueller * TX buffers are extracted from the TX SGL into a separate SGL. 24e870456dSStephan Mueller * 25e870456dSStephan Mueller * After the completion of the crypto operation, the RX SGL and the cipher 26e870456dSStephan Mueller * request is released. The extracted TX SGL parts are released together with 27e870456dSStephan Mueller * the RX SGL release. 288ff59090SHerbert Xu */ 298ff59090SHerbert Xu 308ff59090SHerbert Xu #include <crypto/scatterwalk.h> 318ff59090SHerbert Xu #include <crypto/skcipher.h> 328ff59090SHerbert Xu #include <crypto/if_alg.h> 338ff59090SHerbert Xu #include <linux/init.h> 348ff59090SHerbert Xu #include <linux/list.h> 358ff59090SHerbert Xu #include <linux/kernel.h> 36174cd4b1SIngo Molnar #include <linux/sched/signal.h> 378ff59090SHerbert Xu #include <linux/mm.h> 388ff59090SHerbert Xu #include <linux/module.h> 398ff59090SHerbert Xu #include <linux/net.h> 408ff59090SHerbert Xu #include <net/sock.h> 418ff59090SHerbert Xu 42e870456dSStephan Mueller struct skcipher_tsgl { 438ff59090SHerbert Xu struct list_head list; 448ff59090SHerbert Xu int cur; 458ff59090SHerbert Xu struct scatterlist sg[0]; 468ff59090SHerbert Xu }; 478ff59090SHerbert Xu 48e870456dSStephan Mueller struct skcipher_rsgl { 49e870456dSStephan Mueller struct af_alg_sgl sgl; 50e870456dSStephan Mueller struct list_head list; 51e870456dSStephan Mueller size_t sg_num_bytes; 52e870456dSStephan Mueller }; 53e870456dSStephan Mueller 54e870456dSStephan Mueller struct skcipher_async_req { 55e870456dSStephan Mueller struct kiocb *iocb; 56e870456dSStephan Mueller struct sock *sk; 57e870456dSStephan Mueller 58e870456dSStephan Mueller struct skcipher_rsgl first_sgl; 59e870456dSStephan Mueller struct list_head rsgl_list; 60e870456dSStephan Mueller 61e870456dSStephan Mueller struct scatterlist *tsgl; 62e870456dSStephan Mueller unsigned int tsgl_entries; 63e870456dSStephan Mueller 64e870456dSStephan Mueller unsigned int areqlen; 65e870456dSStephan Mueller struct skcipher_request req; 66e870456dSStephan Mueller }; 67e870456dSStephan Mueller 68dd504589SHerbert Xu struct skcipher_tfm { 69dd504589SHerbert Xu struct crypto_skcipher *skcipher; 70dd504589SHerbert Xu bool has_key; 71dd504589SHerbert Xu }; 72dd504589SHerbert Xu 738ff59090SHerbert Xu struct skcipher_ctx { 74e870456dSStephan Mueller struct list_head tsgl_list; 758ff59090SHerbert Xu 768ff59090SHerbert Xu void *iv; 778ff59090SHerbert Xu 788ff59090SHerbert Xu struct af_alg_completion completion; 798ff59090SHerbert Xu 80652d5b8aSLABBE Corentin size_t used; 81e870456dSStephan Mueller size_t rcvused; 828ff59090SHerbert Xu 838ff59090SHerbert Xu bool more; 848ff59090SHerbert Xu bool merge; 858ff59090SHerbert Xu bool enc; 868ff59090SHerbert Xu 87e870456dSStephan Mueller unsigned int len; 888ff59090SHerbert Xu }; 898ff59090SHerbert Xu 90e870456dSStephan Mueller #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_tsgl)) / \ 918ff59090SHerbert Xu sizeof(struct scatterlist) - 1) 928ff59090SHerbert Xu 930f6bb83cSHerbert Xu static inline int skcipher_sndbuf(struct sock *sk) 948ff59090SHerbert Xu { 958ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 968ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 978ff59090SHerbert Xu 980f6bb83cSHerbert Xu return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - 990f6bb83cSHerbert Xu ctx->used, 0); 1000f6bb83cSHerbert Xu } 1010f6bb83cSHerbert Xu 1020f6bb83cSHerbert Xu static inline bool skcipher_writable(struct sock *sk) 1030f6bb83cSHerbert Xu { 1040f6bb83cSHerbert Xu return PAGE_SIZE <= skcipher_sndbuf(sk); 1058ff59090SHerbert Xu } 1068ff59090SHerbert Xu 107e870456dSStephan Mueller static inline int skcipher_rcvbuf(struct sock *sk) 1088ff59090SHerbert Xu { 1098ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 1108ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 111e870456dSStephan Mueller 112e870456dSStephan Mueller return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - 113e870456dSStephan Mueller ctx->rcvused, 0); 114e870456dSStephan Mueller } 115e870456dSStephan Mueller 116e870456dSStephan Mueller static inline bool skcipher_readable(struct sock *sk) 117e870456dSStephan Mueller { 118e870456dSStephan Mueller return PAGE_SIZE <= skcipher_rcvbuf(sk); 119e870456dSStephan Mueller } 120e870456dSStephan Mueller 121e870456dSStephan Mueller static int skcipher_alloc_tsgl(struct sock *sk) 122e870456dSStephan Mueller { 123e870456dSStephan Mueller struct alg_sock *ask = alg_sk(sk); 124e870456dSStephan Mueller struct skcipher_ctx *ctx = ask->private; 125e870456dSStephan Mueller struct skcipher_tsgl *sgl; 1268ff59090SHerbert Xu struct scatterlist *sg = NULL; 1278ff59090SHerbert Xu 128e870456dSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list); 129e870456dSStephan Mueller if (!list_empty(&ctx->tsgl_list)) 1308ff59090SHerbert Xu sg = sgl->sg; 1318ff59090SHerbert Xu 1328ff59090SHerbert Xu if (!sg || sgl->cur >= MAX_SGL_ENTS) { 1338ff59090SHerbert Xu sgl = sock_kmalloc(sk, sizeof(*sgl) + 1348ff59090SHerbert Xu sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 1358ff59090SHerbert Xu GFP_KERNEL); 1368ff59090SHerbert Xu if (!sgl) 1378ff59090SHerbert Xu return -ENOMEM; 1388ff59090SHerbert Xu 1398ff59090SHerbert Xu sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 1408ff59090SHerbert Xu sgl->cur = 0; 1418ff59090SHerbert Xu 1428ff59090SHerbert Xu if (sg) 143c56f6d12SDan Williams sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 1448ff59090SHerbert Xu 145e870456dSStephan Mueller list_add_tail(&sgl->list, &ctx->tsgl_list); 1468ff59090SHerbert Xu } 1478ff59090SHerbert Xu 1488ff59090SHerbert Xu return 0; 1498ff59090SHerbert Xu } 1508ff59090SHerbert Xu 151e870456dSStephan Mueller static unsigned int skcipher_count_tsgl(struct sock *sk, size_t bytes) 1528ff59090SHerbert Xu { 1538ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 1548ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 155e870456dSStephan Mueller struct skcipher_tsgl *sgl, *tmp; 156e870456dSStephan Mueller unsigned int i; 157e870456dSStephan Mueller unsigned int sgl_count = 0; 1588ff59090SHerbert Xu 159e870456dSStephan Mueller if (!bytes) 160e870456dSStephan Mueller return 0; 161e870456dSStephan Mueller 162e870456dSStephan Mueller list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { 163e870456dSStephan Mueller struct scatterlist *sg = sgl->sg; 164e870456dSStephan Mueller 165e870456dSStephan Mueller for (i = 0; i < sgl->cur; i++) { 166e870456dSStephan Mueller sgl_count++; 167e870456dSStephan Mueller if (sg[i].length >= bytes) 168e870456dSStephan Mueller return sgl_count; 169e870456dSStephan Mueller 170e870456dSStephan Mueller bytes -= sg[i].length; 171e870456dSStephan Mueller } 172e870456dSStephan Mueller } 173e870456dSStephan Mueller 174e870456dSStephan Mueller return sgl_count; 175e870456dSStephan Mueller } 176e870456dSStephan Mueller 177e870456dSStephan Mueller static void skcipher_pull_tsgl(struct sock *sk, size_t used, 178e870456dSStephan Mueller struct scatterlist *dst) 179e870456dSStephan Mueller { 180e870456dSStephan Mueller struct alg_sock *ask = alg_sk(sk); 181e870456dSStephan Mueller struct skcipher_ctx *ctx = ask->private; 182e870456dSStephan Mueller struct skcipher_tsgl *sgl; 183e870456dSStephan Mueller struct scatterlist *sg; 184e870456dSStephan Mueller unsigned int i; 185e870456dSStephan Mueller 186e870456dSStephan Mueller while (!list_empty(&ctx->tsgl_list)) { 187e870456dSStephan Mueller sgl = list_first_entry(&ctx->tsgl_list, struct skcipher_tsgl, 1888ff59090SHerbert Xu list); 1898ff59090SHerbert Xu sg = sgl->sg; 1908ff59090SHerbert Xu 1918ff59090SHerbert Xu for (i = 0; i < sgl->cur; i++) { 192652d5b8aSLABBE Corentin size_t plen = min_t(size_t, used, sg[i].length); 193e870456dSStephan Mueller struct page *page = sg_page(sg + i); 1948ff59090SHerbert Xu 195e870456dSStephan Mueller if (!page) 1968ff59090SHerbert Xu continue; 1978ff59090SHerbert Xu 198e870456dSStephan Mueller /* 199e870456dSStephan Mueller * Assumption: caller created skcipher_count_tsgl(len) 200e870456dSStephan Mueller * SG entries in dst. 201e870456dSStephan Mueller */ 202e870456dSStephan Mueller if (dst) 203e870456dSStephan Mueller sg_set_page(dst + i, page, plen, sg[i].offset); 204e870456dSStephan Mueller 2058ff59090SHerbert Xu sg[i].length -= plen; 2068ff59090SHerbert Xu sg[i].offset += plen; 2078ff59090SHerbert Xu 2088ff59090SHerbert Xu used -= plen; 2098ff59090SHerbert Xu ctx->used -= plen; 2108ff59090SHerbert Xu 2118ff59090SHerbert Xu if (sg[i].length) 2128ff59090SHerbert Xu return; 213e870456dSStephan Mueller 214e870456dSStephan Mueller if (!dst) 215e870456dSStephan Mueller put_page(page); 2168ff59090SHerbert Xu sg_assign_page(sg + i, NULL); 2178ff59090SHerbert Xu } 2188ff59090SHerbert Xu 2198ff59090SHerbert Xu list_del(&sgl->list); 220e870456dSStephan Mueller sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * 2218ff59090SHerbert Xu (MAX_SGL_ENTS + 1)); 2228ff59090SHerbert Xu } 2238ff59090SHerbert Xu 2248ff59090SHerbert Xu if (!ctx->used) 2258ff59090SHerbert Xu ctx->merge = 0; 2268ff59090SHerbert Xu } 2278ff59090SHerbert Xu 228e870456dSStephan Mueller static void skcipher_free_areq_sgls(struct skcipher_async_req *areq) 2298ff59090SHerbert Xu { 230e870456dSStephan Mueller struct sock *sk = areq->sk; 2318ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 2328ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 233e870456dSStephan Mueller struct skcipher_rsgl *rsgl, *tmp; 234e870456dSStephan Mueller struct scatterlist *tsgl; 235e870456dSStephan Mueller struct scatterlist *sg; 236e870456dSStephan Mueller unsigned int i; 2378ff59090SHerbert Xu 238e870456dSStephan Mueller list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 239e870456dSStephan Mueller ctx->rcvused -= rsgl->sg_num_bytes; 240e870456dSStephan Mueller af_alg_free_sg(&rsgl->sgl); 241e870456dSStephan Mueller list_del(&rsgl->list); 242e870456dSStephan Mueller if (rsgl != &areq->first_sgl) 243e870456dSStephan Mueller sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 244e870456dSStephan Mueller } 245e870456dSStephan Mueller 246e870456dSStephan Mueller tsgl = areq->tsgl; 247e870456dSStephan Mueller for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 248e870456dSStephan Mueller if (!sg_page(sg)) 249e870456dSStephan Mueller continue; 250e870456dSStephan Mueller put_page(sg_page(sg)); 251e870456dSStephan Mueller } 252e870456dSStephan Mueller 253e870456dSStephan Mueller if (areq->tsgl && areq->tsgl_entries) 254e870456dSStephan Mueller sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 2558ff59090SHerbert Xu } 2568ff59090SHerbert Xu 2578ff59090SHerbert Xu static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) 2588ff59090SHerbert Xu { 259d9dc8b0fSWANG Cong DEFINE_WAIT_FUNC(wait, woken_wake_function); 2608ff59090SHerbert Xu int err = -ERESTARTSYS; 261d9dc8b0fSWANG Cong long timeout; 2628ff59090SHerbert Xu 2638ff59090SHerbert Xu if (flags & MSG_DONTWAIT) 2648ff59090SHerbert Xu return -EAGAIN; 2658ff59090SHerbert Xu 2669cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2678ff59090SHerbert Xu 268d9dc8b0fSWANG Cong add_wait_queue(sk_sleep(sk), &wait); 2698ff59090SHerbert Xu for (;;) { 2708ff59090SHerbert Xu if (signal_pending(current)) 2718ff59090SHerbert Xu break; 2728ff59090SHerbert Xu timeout = MAX_SCHEDULE_TIMEOUT; 273d9dc8b0fSWANG Cong if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { 2748ff59090SHerbert Xu err = 0; 2758ff59090SHerbert Xu break; 2768ff59090SHerbert Xu } 2778ff59090SHerbert Xu } 278d9dc8b0fSWANG Cong remove_wait_queue(sk_sleep(sk), &wait); 2798ff59090SHerbert Xu 2808ff59090SHerbert Xu return err; 2818ff59090SHerbert Xu } 2828ff59090SHerbert Xu 2838ff59090SHerbert Xu static void skcipher_wmem_wakeup(struct sock *sk) 2848ff59090SHerbert Xu { 2858ff59090SHerbert Xu struct socket_wq *wq; 2868ff59090SHerbert Xu 2878ff59090SHerbert Xu if (!skcipher_writable(sk)) 2888ff59090SHerbert Xu return; 2898ff59090SHerbert Xu 2908ff59090SHerbert Xu rcu_read_lock(); 2918ff59090SHerbert Xu wq = rcu_dereference(sk->sk_wq); 2921ce0bf50SHerbert Xu if (skwq_has_sleeper(wq)) 2938ff59090SHerbert Xu wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 2948ff59090SHerbert Xu POLLRDNORM | 2958ff59090SHerbert Xu POLLRDBAND); 2968ff59090SHerbert Xu sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2978ff59090SHerbert Xu rcu_read_unlock(); 2988ff59090SHerbert Xu } 2998ff59090SHerbert Xu 3008ff59090SHerbert Xu static int skcipher_wait_for_data(struct sock *sk, unsigned flags) 3018ff59090SHerbert Xu { 302d9dc8b0fSWANG Cong DEFINE_WAIT_FUNC(wait, woken_wake_function); 3038ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 3048ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 3058ff59090SHerbert Xu long timeout; 3068ff59090SHerbert Xu int err = -ERESTARTSYS; 3078ff59090SHerbert Xu 3088ff59090SHerbert Xu if (flags & MSG_DONTWAIT) { 3098ff59090SHerbert Xu return -EAGAIN; 3108ff59090SHerbert Xu } 3118ff59090SHerbert Xu 3129cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3138ff59090SHerbert Xu 314d9dc8b0fSWANG Cong add_wait_queue(sk_sleep(sk), &wait); 3158ff59090SHerbert Xu for (;;) { 3168ff59090SHerbert Xu if (signal_pending(current)) 3178ff59090SHerbert Xu break; 3188ff59090SHerbert Xu timeout = MAX_SCHEDULE_TIMEOUT; 319d9dc8b0fSWANG Cong if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { 3208ff59090SHerbert Xu err = 0; 3218ff59090SHerbert Xu break; 3228ff59090SHerbert Xu } 3238ff59090SHerbert Xu } 324d9dc8b0fSWANG Cong remove_wait_queue(sk_sleep(sk), &wait); 3258ff59090SHerbert Xu 3269cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3278ff59090SHerbert Xu 3288ff59090SHerbert Xu return err; 3298ff59090SHerbert Xu } 3308ff59090SHerbert Xu 3318ff59090SHerbert Xu static void skcipher_data_wakeup(struct sock *sk) 3328ff59090SHerbert Xu { 3338ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 3348ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 3358ff59090SHerbert Xu struct socket_wq *wq; 3368ff59090SHerbert Xu 3378ff59090SHerbert Xu if (!ctx->used) 3388ff59090SHerbert Xu return; 3398ff59090SHerbert Xu 3408ff59090SHerbert Xu rcu_read_lock(); 3418ff59090SHerbert Xu wq = rcu_dereference(sk->sk_wq); 3421ce0bf50SHerbert Xu if (skwq_has_sleeper(wq)) 3438ff59090SHerbert Xu wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 3448ff59090SHerbert Xu POLLRDNORM | 3458ff59090SHerbert Xu POLLRDBAND); 3468ff59090SHerbert Xu sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 3478ff59090SHerbert Xu rcu_read_unlock(); 3488ff59090SHerbert Xu } 3498ff59090SHerbert Xu 3501b784140SYing Xue static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 3511b784140SYing Xue size_t size) 3528ff59090SHerbert Xu { 3538ff59090SHerbert Xu struct sock *sk = sock->sk; 3548ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 3556454c2b8SHerbert Xu struct sock *psk = ask->parent; 3566454c2b8SHerbert Xu struct alg_sock *pask = alg_sk(psk); 3578ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 3586454c2b8SHerbert Xu struct skcipher_tfm *skc = pask->private; 3596454c2b8SHerbert Xu struct crypto_skcipher *tfm = skc->skcipher; 3600d96e4baSHerbert Xu unsigned ivsize = crypto_skcipher_ivsize(tfm); 361e870456dSStephan Mueller struct skcipher_tsgl *sgl; 3628ff59090SHerbert Xu struct af_alg_control con = {}; 3638ff59090SHerbert Xu long copied = 0; 3648ff59090SHerbert Xu bool enc = 0; 365f26b7b80SStephan Mueller bool init = 0; 3668ff59090SHerbert Xu int err; 3678ff59090SHerbert Xu int i; 3688ff59090SHerbert Xu 3698ff59090SHerbert Xu if (msg->msg_controllen) { 3708ff59090SHerbert Xu err = af_alg_cmsg_send(msg, &con); 3718ff59090SHerbert Xu if (err) 3728ff59090SHerbert Xu return err; 3738ff59090SHerbert Xu 374f26b7b80SStephan Mueller init = 1; 3758ff59090SHerbert Xu switch (con.op) { 3768ff59090SHerbert Xu case ALG_OP_ENCRYPT: 3778ff59090SHerbert Xu enc = 1; 3788ff59090SHerbert Xu break; 3798ff59090SHerbert Xu case ALG_OP_DECRYPT: 3808ff59090SHerbert Xu enc = 0; 3818ff59090SHerbert Xu break; 3828ff59090SHerbert Xu default: 3838ff59090SHerbert Xu return -EINVAL; 3848ff59090SHerbert Xu } 3858ff59090SHerbert Xu 3868ff59090SHerbert Xu if (con.iv && con.iv->ivlen != ivsize) 3878ff59090SHerbert Xu return -EINVAL; 3888ff59090SHerbert Xu } 3898ff59090SHerbert Xu 3908ff59090SHerbert Xu err = -EINVAL; 3918ff59090SHerbert Xu 3928ff59090SHerbert Xu lock_sock(sk); 3938ff59090SHerbert Xu if (!ctx->more && ctx->used) 3948ff59090SHerbert Xu goto unlock; 3958ff59090SHerbert Xu 396f26b7b80SStephan Mueller if (init) { 3978ff59090SHerbert Xu ctx->enc = enc; 3988ff59090SHerbert Xu if (con.iv) 3998ff59090SHerbert Xu memcpy(ctx->iv, con.iv->iv, ivsize); 4008ff59090SHerbert Xu } 4018ff59090SHerbert Xu 4028ff59090SHerbert Xu while (size) { 4038ff59090SHerbert Xu struct scatterlist *sg; 4048ff59090SHerbert Xu unsigned long len = size; 405652d5b8aSLABBE Corentin size_t plen; 4068ff59090SHerbert Xu 4078ff59090SHerbert Xu if (ctx->merge) { 408e870456dSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, 409e870456dSStephan Mueller struct skcipher_tsgl, list); 4108ff59090SHerbert Xu sg = sgl->sg + sgl->cur - 1; 4118ff59090SHerbert Xu len = min_t(unsigned long, len, 4128ff59090SHerbert Xu PAGE_SIZE - sg->offset - sg->length); 4138ff59090SHerbert Xu 4146ce8e9ceSAl Viro err = memcpy_from_msg(page_address(sg_page(sg)) + 4158ff59090SHerbert Xu sg->offset + sg->length, 4166ce8e9ceSAl Viro msg, len); 4178ff59090SHerbert Xu if (err) 4188ff59090SHerbert Xu goto unlock; 4198ff59090SHerbert Xu 4208ff59090SHerbert Xu sg->length += len; 4218ff59090SHerbert Xu ctx->merge = (sg->offset + sg->length) & 4228ff59090SHerbert Xu (PAGE_SIZE - 1); 4238ff59090SHerbert Xu 4248ff59090SHerbert Xu ctx->used += len; 4258ff59090SHerbert Xu copied += len; 4268ff59090SHerbert Xu size -= len; 4278ff59090SHerbert Xu continue; 4288ff59090SHerbert Xu } 4298ff59090SHerbert Xu 4300f6bb83cSHerbert Xu if (!skcipher_writable(sk)) { 4318ff59090SHerbert Xu err = skcipher_wait_for_wmem(sk, msg->msg_flags); 4328ff59090SHerbert Xu if (err) 4338ff59090SHerbert Xu goto unlock; 4348ff59090SHerbert Xu } 4358ff59090SHerbert Xu 4360f6bb83cSHerbert Xu len = min_t(unsigned long, len, skcipher_sndbuf(sk)); 4378ff59090SHerbert Xu 438e870456dSStephan Mueller err = skcipher_alloc_tsgl(sk); 4398ff59090SHerbert Xu if (err) 4408ff59090SHerbert Xu goto unlock; 4418ff59090SHerbert Xu 442e870456dSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, 443e870456dSStephan Mueller list); 4448ff59090SHerbert Xu sg = sgl->sg; 445202736d9SHerbert Xu if (sgl->cur) 446202736d9SHerbert Xu sg_unmark_end(sg + sgl->cur - 1); 4478ff59090SHerbert Xu do { 4488ff59090SHerbert Xu i = sgl->cur; 449652d5b8aSLABBE Corentin plen = min_t(size_t, len, PAGE_SIZE); 4508ff59090SHerbert Xu 4518ff59090SHerbert Xu sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 4528ff59090SHerbert Xu err = -ENOMEM; 4538ff59090SHerbert Xu if (!sg_page(sg + i)) 4548ff59090SHerbert Xu goto unlock; 4558ff59090SHerbert Xu 4566ce8e9ceSAl Viro err = memcpy_from_msg(page_address(sg_page(sg + i)), 4576ce8e9ceSAl Viro msg, plen); 4588ff59090SHerbert Xu if (err) { 4598ff59090SHerbert Xu __free_page(sg_page(sg + i)); 4608ff59090SHerbert Xu sg_assign_page(sg + i, NULL); 4618ff59090SHerbert Xu goto unlock; 4628ff59090SHerbert Xu } 4638ff59090SHerbert Xu 4648ff59090SHerbert Xu sg[i].length = plen; 4658ff59090SHerbert Xu len -= plen; 4668ff59090SHerbert Xu ctx->used += plen; 4678ff59090SHerbert Xu copied += plen; 4688ff59090SHerbert Xu size -= plen; 4698ff59090SHerbert Xu sgl->cur++; 4708ff59090SHerbert Xu } while (len && sgl->cur < MAX_SGL_ENTS); 4718ff59090SHerbert Xu 4720f477b65STadeusz Struk if (!size) 4730f477b65STadeusz Struk sg_mark_end(sg + sgl->cur - 1); 4740f477b65STadeusz Struk 4758ff59090SHerbert Xu ctx->merge = plen & (PAGE_SIZE - 1); 4768ff59090SHerbert Xu } 4778ff59090SHerbert Xu 4788ff59090SHerbert Xu err = 0; 4798ff59090SHerbert Xu 4808ff59090SHerbert Xu ctx->more = msg->msg_flags & MSG_MORE; 4818ff59090SHerbert Xu 4828ff59090SHerbert Xu unlock: 4838ff59090SHerbert Xu skcipher_data_wakeup(sk); 4848ff59090SHerbert Xu release_sock(sk); 4858ff59090SHerbert Xu 4868ff59090SHerbert Xu return copied ?: err; 4878ff59090SHerbert Xu } 4888ff59090SHerbert Xu 4898ff59090SHerbert Xu static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, 4908ff59090SHerbert Xu int offset, size_t size, int flags) 4918ff59090SHerbert Xu { 4928ff59090SHerbert Xu struct sock *sk = sock->sk; 4938ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 4948ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 495e870456dSStephan Mueller struct skcipher_tsgl *sgl; 4968ff59090SHerbert Xu int err = -EINVAL; 4978ff59090SHerbert Xu 498d3f7d56aSShawn Landden if (flags & MSG_SENDPAGE_NOTLAST) 499d3f7d56aSShawn Landden flags |= MSG_MORE; 500d3f7d56aSShawn Landden 5018ff59090SHerbert Xu lock_sock(sk); 5028ff59090SHerbert Xu if (!ctx->more && ctx->used) 5038ff59090SHerbert Xu goto unlock; 5048ff59090SHerbert Xu 5058ff59090SHerbert Xu if (!size) 5068ff59090SHerbert Xu goto done; 5078ff59090SHerbert Xu 5080f6bb83cSHerbert Xu if (!skcipher_writable(sk)) { 5098ff59090SHerbert Xu err = skcipher_wait_for_wmem(sk, flags); 5108ff59090SHerbert Xu if (err) 5118ff59090SHerbert Xu goto unlock; 5128ff59090SHerbert Xu } 5138ff59090SHerbert Xu 514e870456dSStephan Mueller err = skcipher_alloc_tsgl(sk); 5158ff59090SHerbert Xu if (err) 5168ff59090SHerbert Xu goto unlock; 5178ff59090SHerbert Xu 5188ff59090SHerbert Xu ctx->merge = 0; 519e870456dSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct skcipher_tsgl, list); 5208ff59090SHerbert Xu 5210f477b65STadeusz Struk if (sgl->cur) 5220f477b65STadeusz Struk sg_unmark_end(sgl->sg + sgl->cur - 1); 5230f477b65STadeusz Struk 5240f477b65STadeusz Struk sg_mark_end(sgl->sg + sgl->cur); 5258ff59090SHerbert Xu get_page(page); 5268ff59090SHerbert Xu sg_set_page(sgl->sg + sgl->cur, page, size, offset); 5278ff59090SHerbert Xu sgl->cur++; 5288ff59090SHerbert Xu ctx->used += size; 5298ff59090SHerbert Xu 5308ff59090SHerbert Xu done: 5318ff59090SHerbert Xu ctx->more = flags & MSG_MORE; 5328ff59090SHerbert Xu 5338ff59090SHerbert Xu unlock: 5348ff59090SHerbert Xu skcipher_data_wakeup(sk); 5358ff59090SHerbert Xu release_sock(sk); 5368ff59090SHerbert Xu 5378ff59090SHerbert Xu return err ?: size; 5388ff59090SHerbert Xu } 5398ff59090SHerbert Xu 540e870456dSStephan Mueller static void skcipher_async_cb(struct crypto_async_request *req, int err) 541a596999bSTadeusz Struk { 542e870456dSStephan Mueller struct skcipher_async_req *areq = req->data; 543e870456dSStephan Mueller struct sock *sk = areq->sk; 544e870456dSStephan Mueller struct kiocb *iocb = areq->iocb; 545e870456dSStephan Mueller unsigned int resultlen; 546a596999bSTadeusz Struk 547e870456dSStephan Mueller lock_sock(sk); 548a596999bSTadeusz Struk 549e870456dSStephan Mueller /* Buffer size written by crypto operation. */ 550e870456dSStephan Mueller resultlen = areq->req.cryptlen; 551a596999bSTadeusz Struk 552e870456dSStephan Mueller skcipher_free_areq_sgls(areq); 553e870456dSStephan Mueller sock_kfree_s(sk, areq, areq->areqlen); 554e870456dSStephan Mueller __sock_put(sk); 555e870456dSStephan Mueller 556e870456dSStephan Mueller iocb->ki_complete(iocb, err ? err : resultlen, 0); 557e870456dSStephan Mueller 558e870456dSStephan Mueller release_sock(sk); 559a596999bSTadeusz Struk } 560a596999bSTadeusz Struk 561e870456dSStephan Mueller static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 562e870456dSStephan Mueller size_t ignored, int flags) 563a596999bSTadeusz Struk { 564a596999bSTadeusz Struk struct sock *sk = sock->sk; 565a596999bSTadeusz Struk struct alg_sock *ask = alg_sk(sk); 566ec69bbfbSHerbert Xu struct sock *psk = ask->parent; 567ec69bbfbSHerbert Xu struct alg_sock *pask = alg_sk(psk); 568a596999bSTadeusz Struk struct skcipher_ctx *ctx = ask->private; 569ec69bbfbSHerbert Xu struct skcipher_tfm *skc = pask->private; 570ec69bbfbSHerbert Xu struct crypto_skcipher *tfm = skc->skcipher; 571e870456dSStephan Mueller unsigned int bs = crypto_skcipher_blocksize(tfm); 572e870456dSStephan Mueller unsigned int areqlen = sizeof(struct skcipher_async_req) + 573e870456dSStephan Mueller crypto_skcipher_reqsize(tfm); 574e870456dSStephan Mueller struct skcipher_async_req *areq; 575e870456dSStephan Mueller struct skcipher_rsgl *last_rsgl = NULL; 576e870456dSStephan Mueller int err = 0; 577e870456dSStephan Mueller size_t len = 0; 578ec69bbfbSHerbert Xu 579e870456dSStephan Mueller /* Allocate cipher request for current operation. */ 580e870456dSStephan Mueller areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 581e870456dSStephan Mueller if (unlikely(!areq)) 582e870456dSStephan Mueller return -ENOMEM; 583e870456dSStephan Mueller areq->areqlen = areqlen; 584e870456dSStephan Mueller areq->sk = sk; 585e870456dSStephan Mueller INIT_LIST_HEAD(&areq->rsgl_list); 586e870456dSStephan Mueller areq->tsgl = NULL; 587e870456dSStephan Mueller areq->tsgl_entries = 0; 588ec69bbfbSHerbert Xu 589e870456dSStephan Mueller /* convert iovecs of output buffers into RX SGL */ 590e870456dSStephan Mueller while (msg_data_left(msg)) { 591e870456dSStephan Mueller struct skcipher_rsgl *rsgl; 592e870456dSStephan Mueller size_t seglen; 593a596999bSTadeusz Struk 594e870456dSStephan Mueller /* limit the amount of readable buffers */ 595e870456dSStephan Mueller if (!skcipher_readable(sk)) 596e870456dSStephan Mueller break; 597a596999bSTadeusz Struk 598a596999bSTadeusz Struk if (!ctx->used) { 599a596999bSTadeusz Struk err = skcipher_wait_for_data(sk, flags); 600a596999bSTadeusz Struk if (err) 601a596999bSTadeusz Struk goto free; 602a596999bSTadeusz Struk } 603a596999bSTadeusz Struk 604e870456dSStephan Mueller seglen = min_t(size_t, ctx->used, msg_data_left(msg)); 605a596999bSTadeusz Struk 606e870456dSStephan Mueller if (list_empty(&areq->rsgl_list)) { 607e870456dSStephan Mueller rsgl = &areq->first_sgl; 608a596999bSTadeusz Struk } else { 609e870456dSStephan Mueller rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 610a596999bSTadeusz Struk if (!rsgl) { 611a596999bSTadeusz Struk err = -ENOMEM; 612a596999bSTadeusz Struk goto free; 613a596999bSTadeusz Struk } 614a596999bSTadeusz Struk } 615a596999bSTadeusz Struk 616e870456dSStephan Mueller rsgl->sgl.npages = 0; 617e870456dSStephan Mueller list_add_tail(&rsgl->list, &areq->rsgl_list); 618e870456dSStephan Mueller 619e870456dSStephan Mueller /* make one iovec available as scatterlist */ 620e870456dSStephan Mueller err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 621e870456dSStephan Mueller if (err < 0) 622a596999bSTadeusz Struk goto free; 623e870456dSStephan Mueller 624e870456dSStephan Mueller /* chain the new scatterlist with previous one */ 625a596999bSTadeusz Struk if (last_rsgl) 626a596999bSTadeusz Struk af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); 627a596999bSTadeusz Struk 628a596999bSTadeusz Struk last_rsgl = rsgl; 629e870456dSStephan Mueller len += err; 630e870456dSStephan Mueller ctx->rcvused += err; 631e870456dSStephan Mueller rsgl->sg_num_bytes = err; 632e870456dSStephan Mueller iov_iter_advance(&msg->msg_iter, err); 633a596999bSTadeusz Struk } 634a596999bSTadeusz Struk 635e870456dSStephan Mueller /* Process only as much RX buffers for which we have TX data */ 636e870456dSStephan Mueller if (len > ctx->used) 637e870456dSStephan Mueller len = ctx->used; 638033f46b3Stadeusz.struk@intel.com 639e870456dSStephan Mueller /* 640e870456dSStephan Mueller * If more buffers are to be expected to be processed, process only 641e870456dSStephan Mueller * full block size buffers. 642e870456dSStephan Mueller */ 643e870456dSStephan Mueller if (ctx->more || len < ctx->used) 644e870456dSStephan Mueller len -= len % bs; 645a596999bSTadeusz Struk 646e870456dSStephan Mueller /* 647e870456dSStephan Mueller * Create a per request TX SGL for this request which tracks the 648e870456dSStephan Mueller * SG entries from the global TX SGL. 649e870456dSStephan Mueller */ 650e870456dSStephan Mueller areq->tsgl_entries = skcipher_count_tsgl(sk, len); 651e870456dSStephan Mueller if (!areq->tsgl_entries) 652e870456dSStephan Mueller areq->tsgl_entries = 1; 653e870456dSStephan Mueller areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, 654e870456dSStephan Mueller GFP_KERNEL); 655e870456dSStephan Mueller if (!areq->tsgl) { 656e870456dSStephan Mueller err = -ENOMEM; 657bc97e57eSHerbert Xu goto free; 658e870456dSStephan Mueller } 659e870456dSStephan Mueller sg_init_table(areq->tsgl, areq->tsgl_entries); 660e870456dSStephan Mueller skcipher_pull_tsgl(sk, len, areq->tsgl); 6618ff59090SHerbert Xu 662e870456dSStephan Mueller /* Initialize the crypto operation */ 663e870456dSStephan Mueller skcipher_request_set_tfm(&areq->req, tfm); 664e870456dSStephan Mueller skcipher_request_set_crypt(&areq->req, areq->tsgl, 665e870456dSStephan Mueller areq->first_sgl.sgl.sg, len, ctx->iv); 6664f0414e5SHerbert Xu 667e870456dSStephan Mueller if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 668e870456dSStephan Mueller /* AIO operation */ 669e870456dSStephan Mueller areq->iocb = msg->msg_iocb; 670e870456dSStephan Mueller skcipher_request_set_callback(&areq->req, 671e870456dSStephan Mueller CRYPTO_TFM_REQ_MAY_SLEEP, 672e870456dSStephan Mueller skcipher_async_cb, areq); 673e870456dSStephan Mueller err = ctx->enc ? crypto_skcipher_encrypt(&areq->req) : 674e870456dSStephan Mueller crypto_skcipher_decrypt(&areq->req); 675e870456dSStephan Mueller } else { 676e870456dSStephan Mueller /* Synchronous operation */ 677e870456dSStephan Mueller skcipher_request_set_callback(&areq->req, 678e870456dSStephan Mueller CRYPTO_TFM_REQ_MAY_SLEEP | 679e870456dSStephan Mueller CRYPTO_TFM_REQ_MAY_BACKLOG, 680e870456dSStephan Mueller af_alg_complete, 6818ff59090SHerbert Xu &ctx->completion); 682e870456dSStephan Mueller err = af_alg_wait_for_completion(ctx->enc ? 683e870456dSStephan Mueller crypto_skcipher_encrypt(&areq->req) : 684e870456dSStephan Mueller crypto_skcipher_decrypt(&areq->req), 685e870456dSStephan Mueller &ctx->completion); 6868ff59090SHerbert Xu } 6878ff59090SHerbert Xu 688e870456dSStephan Mueller /* AIO operation in progress */ 689e870456dSStephan Mueller if (err == -EINPROGRESS) { 690e870456dSStephan Mueller sock_hold(sk); 691e870456dSStephan Mueller return -EIOCBQUEUED; 692e870456dSStephan Mueller } 6938ff59090SHerbert Xu 694e870456dSStephan Mueller free: 695e870456dSStephan Mueller skcipher_free_areq_sgls(areq); 696e870456dSStephan Mueller if (areq) 697e870456dSStephan Mueller sock_kfree_s(sk, areq, areqlen); 6988ff59090SHerbert Xu 699e870456dSStephan Mueller return err ? err : len; 7008ff59090SHerbert Xu } 7018ff59090SHerbert Xu 702a596999bSTadeusz Struk static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 703a596999bSTadeusz Struk size_t ignored, int flags) 704a596999bSTadeusz Struk { 705e870456dSStephan Mueller struct sock *sk = sock->sk; 706e870456dSStephan Mueller int ret = 0; 707e870456dSStephan Mueller 708e870456dSStephan Mueller lock_sock(sk); 709e870456dSStephan Mueller while (msg_data_left(msg)) { 710e870456dSStephan Mueller int err = _skcipher_recvmsg(sock, msg, ignored, flags); 711e870456dSStephan Mueller 712e870456dSStephan Mueller /* 713e870456dSStephan Mueller * This error covers -EIOCBQUEUED which implies that we can 714e870456dSStephan Mueller * only handle one AIO request. If the caller wants to have 715e870456dSStephan Mueller * multiple AIO requests in parallel, he must make multiple 716e870456dSStephan Mueller * separate AIO calls. 717*5703c826SStephan Mueller * 718*5703c826SStephan Mueller * Also return the error if no data has been processed so far. 719e870456dSStephan Mueller */ 720e870456dSStephan Mueller if (err <= 0) { 721*5703c826SStephan Mueller if (err == -EIOCBQUEUED || !ret) 722e870456dSStephan Mueller ret = err; 723e870456dSStephan Mueller goto out; 724e870456dSStephan Mueller } 725e870456dSStephan Mueller 726e870456dSStephan Mueller ret += err; 727e870456dSStephan Mueller } 728e870456dSStephan Mueller 729e870456dSStephan Mueller out: 730e870456dSStephan Mueller skcipher_wmem_wakeup(sk); 731e870456dSStephan Mueller release_sock(sk); 732e870456dSStephan Mueller return ret; 733a596999bSTadeusz Struk } 7348ff59090SHerbert Xu 7358ff59090SHerbert Xu static unsigned int skcipher_poll(struct file *file, struct socket *sock, 7368ff59090SHerbert Xu poll_table *wait) 7378ff59090SHerbert Xu { 7388ff59090SHerbert Xu struct sock *sk = sock->sk; 7398ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 7408ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 7418ff59090SHerbert Xu unsigned int mask; 7428ff59090SHerbert Xu 7438ff59090SHerbert Xu sock_poll_wait(file, sk_sleep(sk), wait); 7448ff59090SHerbert Xu mask = 0; 7458ff59090SHerbert Xu 7468ff59090SHerbert Xu if (ctx->used) 7478ff59090SHerbert Xu mask |= POLLIN | POLLRDNORM; 7488ff59090SHerbert Xu 7498ff59090SHerbert Xu if (skcipher_writable(sk)) 7508ff59090SHerbert Xu mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 7518ff59090SHerbert Xu 7528ff59090SHerbert Xu return mask; 7538ff59090SHerbert Xu } 7548ff59090SHerbert Xu 7558ff59090SHerbert Xu static struct proto_ops algif_skcipher_ops = { 7568ff59090SHerbert Xu .family = PF_ALG, 7578ff59090SHerbert Xu 7588ff59090SHerbert Xu .connect = sock_no_connect, 7598ff59090SHerbert Xu .socketpair = sock_no_socketpair, 7608ff59090SHerbert Xu .getname = sock_no_getname, 7618ff59090SHerbert Xu .ioctl = sock_no_ioctl, 7628ff59090SHerbert Xu .listen = sock_no_listen, 7638ff59090SHerbert Xu .shutdown = sock_no_shutdown, 7648ff59090SHerbert Xu .getsockopt = sock_no_getsockopt, 7658ff59090SHerbert Xu .mmap = sock_no_mmap, 7668ff59090SHerbert Xu .bind = sock_no_bind, 7678ff59090SHerbert Xu .accept = sock_no_accept, 7688ff59090SHerbert Xu .setsockopt = sock_no_setsockopt, 7698ff59090SHerbert Xu 7708ff59090SHerbert Xu .release = af_alg_release, 7718ff59090SHerbert Xu .sendmsg = skcipher_sendmsg, 7728ff59090SHerbert Xu .sendpage = skcipher_sendpage, 7738ff59090SHerbert Xu .recvmsg = skcipher_recvmsg, 7748ff59090SHerbert Xu .poll = skcipher_poll, 7758ff59090SHerbert Xu }; 7768ff59090SHerbert Xu 777a0fa2d03SHerbert Xu static int skcipher_check_key(struct socket *sock) 778a0fa2d03SHerbert Xu { 7791822793aSHerbert Xu int err = 0; 780a0fa2d03SHerbert Xu struct sock *psk; 781a0fa2d03SHerbert Xu struct alg_sock *pask; 782a0fa2d03SHerbert Xu struct skcipher_tfm *tfm; 783a0fa2d03SHerbert Xu struct sock *sk = sock->sk; 784a0fa2d03SHerbert Xu struct alg_sock *ask = alg_sk(sk); 785a0fa2d03SHerbert Xu 7861822793aSHerbert Xu lock_sock(sk); 787a0fa2d03SHerbert Xu if (ask->refcnt) 7881822793aSHerbert Xu goto unlock_child; 789a0fa2d03SHerbert Xu 790a0fa2d03SHerbert Xu psk = ask->parent; 791a0fa2d03SHerbert Xu pask = alg_sk(ask->parent); 792a0fa2d03SHerbert Xu tfm = pask->private; 793a0fa2d03SHerbert Xu 794a0fa2d03SHerbert Xu err = -ENOKEY; 7951822793aSHerbert Xu lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 796a0fa2d03SHerbert Xu if (!tfm->has_key) 797a0fa2d03SHerbert Xu goto unlock; 798a0fa2d03SHerbert Xu 799a0fa2d03SHerbert Xu if (!pask->refcnt++) 800a0fa2d03SHerbert Xu sock_hold(psk); 801a0fa2d03SHerbert Xu 802a0fa2d03SHerbert Xu ask->refcnt = 1; 803a0fa2d03SHerbert Xu sock_put(psk); 804a0fa2d03SHerbert Xu 805a0fa2d03SHerbert Xu err = 0; 806a0fa2d03SHerbert Xu 807a0fa2d03SHerbert Xu unlock: 808a0fa2d03SHerbert Xu release_sock(psk); 8091822793aSHerbert Xu unlock_child: 8101822793aSHerbert Xu release_sock(sk); 811a0fa2d03SHerbert Xu 812a0fa2d03SHerbert Xu return err; 813a0fa2d03SHerbert Xu } 814a0fa2d03SHerbert Xu 815a0fa2d03SHerbert Xu static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 816a0fa2d03SHerbert Xu size_t size) 817a0fa2d03SHerbert Xu { 818a0fa2d03SHerbert Xu int err; 819a0fa2d03SHerbert Xu 820a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 821a0fa2d03SHerbert Xu if (err) 822a0fa2d03SHerbert Xu return err; 823a0fa2d03SHerbert Xu 824a0fa2d03SHerbert Xu return skcipher_sendmsg(sock, msg, size); 825a0fa2d03SHerbert Xu } 826a0fa2d03SHerbert Xu 827a0fa2d03SHerbert Xu static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, 828a0fa2d03SHerbert Xu int offset, size_t size, int flags) 829a0fa2d03SHerbert Xu { 830a0fa2d03SHerbert Xu int err; 831a0fa2d03SHerbert Xu 832a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 833a0fa2d03SHerbert Xu if (err) 834a0fa2d03SHerbert Xu return err; 835a0fa2d03SHerbert Xu 836a0fa2d03SHerbert Xu return skcipher_sendpage(sock, page, offset, size, flags); 837a0fa2d03SHerbert Xu } 838a0fa2d03SHerbert Xu 839a0fa2d03SHerbert Xu static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 840a0fa2d03SHerbert Xu size_t ignored, int flags) 841a0fa2d03SHerbert Xu { 842a0fa2d03SHerbert Xu int err; 843a0fa2d03SHerbert Xu 844a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 845a0fa2d03SHerbert Xu if (err) 846a0fa2d03SHerbert Xu return err; 847a0fa2d03SHerbert Xu 848a0fa2d03SHerbert Xu return skcipher_recvmsg(sock, msg, ignored, flags); 849a0fa2d03SHerbert Xu } 850a0fa2d03SHerbert Xu 851a0fa2d03SHerbert Xu static struct proto_ops algif_skcipher_ops_nokey = { 852a0fa2d03SHerbert Xu .family = PF_ALG, 853a0fa2d03SHerbert Xu 854a0fa2d03SHerbert Xu .connect = sock_no_connect, 855a0fa2d03SHerbert Xu .socketpair = sock_no_socketpair, 856a0fa2d03SHerbert Xu .getname = sock_no_getname, 857a0fa2d03SHerbert Xu .ioctl = sock_no_ioctl, 858a0fa2d03SHerbert Xu .listen = sock_no_listen, 859a0fa2d03SHerbert Xu .shutdown = sock_no_shutdown, 860a0fa2d03SHerbert Xu .getsockopt = sock_no_getsockopt, 861a0fa2d03SHerbert Xu .mmap = sock_no_mmap, 862a0fa2d03SHerbert Xu .bind = sock_no_bind, 863a0fa2d03SHerbert Xu .accept = sock_no_accept, 864a0fa2d03SHerbert Xu .setsockopt = sock_no_setsockopt, 865a0fa2d03SHerbert Xu 866a0fa2d03SHerbert Xu .release = af_alg_release, 867a0fa2d03SHerbert Xu .sendmsg = skcipher_sendmsg_nokey, 868a0fa2d03SHerbert Xu .sendpage = skcipher_sendpage_nokey, 869a0fa2d03SHerbert Xu .recvmsg = skcipher_recvmsg_nokey, 870a0fa2d03SHerbert Xu .poll = skcipher_poll, 871a0fa2d03SHerbert Xu }; 872a0fa2d03SHerbert Xu 8738ff59090SHerbert Xu static void *skcipher_bind(const char *name, u32 type, u32 mask) 8748ff59090SHerbert Xu { 875dd504589SHerbert Xu struct skcipher_tfm *tfm; 876dd504589SHerbert Xu struct crypto_skcipher *skcipher; 877dd504589SHerbert Xu 878dd504589SHerbert Xu tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); 879dd504589SHerbert Xu if (!tfm) 880dd504589SHerbert Xu return ERR_PTR(-ENOMEM); 881dd504589SHerbert Xu 882dd504589SHerbert Xu skcipher = crypto_alloc_skcipher(name, type, mask); 883dd504589SHerbert Xu if (IS_ERR(skcipher)) { 884dd504589SHerbert Xu kfree(tfm); 885dd504589SHerbert Xu return ERR_CAST(skcipher); 886dd504589SHerbert Xu } 887dd504589SHerbert Xu 888dd504589SHerbert Xu tfm->skcipher = skcipher; 889dd504589SHerbert Xu 890dd504589SHerbert Xu return tfm; 8918ff59090SHerbert Xu } 8928ff59090SHerbert Xu 8938ff59090SHerbert Xu static void skcipher_release(void *private) 8948ff59090SHerbert Xu { 895dd504589SHerbert Xu struct skcipher_tfm *tfm = private; 896dd504589SHerbert Xu 897dd504589SHerbert Xu crypto_free_skcipher(tfm->skcipher); 898dd504589SHerbert Xu kfree(tfm); 8998ff59090SHerbert Xu } 9008ff59090SHerbert Xu 9018ff59090SHerbert Xu static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 9028ff59090SHerbert Xu { 903dd504589SHerbert Xu struct skcipher_tfm *tfm = private; 904dd504589SHerbert Xu int err; 905dd504589SHerbert Xu 906dd504589SHerbert Xu err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); 907dd504589SHerbert Xu tfm->has_key = !err; 908dd504589SHerbert Xu 909dd504589SHerbert Xu return err; 9108ff59090SHerbert Xu } 9118ff59090SHerbert Xu 9128ff59090SHerbert Xu static void skcipher_sock_destruct(struct sock *sk) 9138ff59090SHerbert Xu { 9148ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 9158ff59090SHerbert Xu struct skcipher_ctx *ctx = ask->private; 916e870456dSStephan Mueller struct sock *psk = ask->parent; 917e870456dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 918e870456dSStephan Mueller struct skcipher_tfm *skc = pask->private; 919e870456dSStephan Mueller struct crypto_skcipher *tfm = skc->skcipher; 9208ff59090SHerbert Xu 921e870456dSStephan Mueller skcipher_pull_tsgl(sk, ctx->used, NULL); 9220d96e4baSHerbert Xu sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 9238ff59090SHerbert Xu sock_kfree_s(sk, ctx, ctx->len); 9248ff59090SHerbert Xu af_alg_release_parent(sk); 9258ff59090SHerbert Xu } 9268ff59090SHerbert Xu 927d7b65aeeSHerbert Xu static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 9288ff59090SHerbert Xu { 9298ff59090SHerbert Xu struct skcipher_ctx *ctx; 9308ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 931dd504589SHerbert Xu struct skcipher_tfm *tfm = private; 932dd504589SHerbert Xu struct crypto_skcipher *skcipher = tfm->skcipher; 933e870456dSStephan Mueller unsigned int len = sizeof(*ctx); 9348ff59090SHerbert Xu 9358ff59090SHerbert Xu ctx = sock_kmalloc(sk, len, GFP_KERNEL); 9368ff59090SHerbert Xu if (!ctx) 9378ff59090SHerbert Xu return -ENOMEM; 9388ff59090SHerbert Xu 939dd504589SHerbert Xu ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), 9408ff59090SHerbert Xu GFP_KERNEL); 9418ff59090SHerbert Xu if (!ctx->iv) { 9428ff59090SHerbert Xu sock_kfree_s(sk, ctx, len); 9438ff59090SHerbert Xu return -ENOMEM; 9448ff59090SHerbert Xu } 9458ff59090SHerbert Xu 946dd504589SHerbert Xu memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); 9478ff59090SHerbert Xu 948e870456dSStephan Mueller INIT_LIST_HEAD(&ctx->tsgl_list); 9498ff59090SHerbert Xu ctx->len = len; 9508ff59090SHerbert Xu ctx->used = 0; 951e870456dSStephan Mueller ctx->rcvused = 0; 9528ff59090SHerbert Xu ctx->more = 0; 9538ff59090SHerbert Xu ctx->merge = 0; 9548ff59090SHerbert Xu ctx->enc = 0; 9558ff59090SHerbert Xu af_alg_init_completion(&ctx->completion); 9568ff59090SHerbert Xu 9578ff59090SHerbert Xu ask->private = ctx; 9588ff59090SHerbert Xu 9598ff59090SHerbert Xu sk->sk_destruct = skcipher_sock_destruct; 9608ff59090SHerbert Xu 9618ff59090SHerbert Xu return 0; 9628ff59090SHerbert Xu } 9638ff59090SHerbert Xu 964a0fa2d03SHerbert Xu static int skcipher_accept_parent(void *private, struct sock *sk) 965a0fa2d03SHerbert Xu { 966a0fa2d03SHerbert Xu struct skcipher_tfm *tfm = private; 967a0fa2d03SHerbert Xu 9686e8d8ecfSHerbert Xu if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) 969a0fa2d03SHerbert Xu return -ENOKEY; 970a0fa2d03SHerbert Xu 971d7b65aeeSHerbert Xu return skcipher_accept_parent_nokey(private, sk); 972a0fa2d03SHerbert Xu } 973a0fa2d03SHerbert Xu 9748ff59090SHerbert Xu static const struct af_alg_type algif_type_skcipher = { 9758ff59090SHerbert Xu .bind = skcipher_bind, 9768ff59090SHerbert Xu .release = skcipher_release, 9778ff59090SHerbert Xu .setkey = skcipher_setkey, 9788ff59090SHerbert Xu .accept = skcipher_accept_parent, 979a0fa2d03SHerbert Xu .accept_nokey = skcipher_accept_parent_nokey, 9808ff59090SHerbert Xu .ops = &algif_skcipher_ops, 981a0fa2d03SHerbert Xu .ops_nokey = &algif_skcipher_ops_nokey, 9828ff59090SHerbert Xu .name = "skcipher", 9838ff59090SHerbert Xu .owner = THIS_MODULE 9848ff59090SHerbert Xu }; 9858ff59090SHerbert Xu 9868ff59090SHerbert Xu static int __init algif_skcipher_init(void) 9878ff59090SHerbert Xu { 9888ff59090SHerbert Xu return af_alg_register_type(&algif_type_skcipher); 9898ff59090SHerbert Xu } 9908ff59090SHerbert Xu 9918ff59090SHerbert Xu static void __exit algif_skcipher_exit(void) 9928ff59090SHerbert Xu { 9938ff59090SHerbert Xu int err = af_alg_unregister_type(&algif_type_skcipher); 9948ff59090SHerbert Xu BUG_ON(err); 9958ff59090SHerbert Xu } 9968ff59090SHerbert Xu 9978ff59090SHerbert Xu module_init(algif_skcipher_init); 9988ff59090SHerbert Xu module_exit(algif_skcipher_exit); 9998ff59090SHerbert Xu MODULE_LICENSE("GPL"); 1000