103c8efc1SHerbert Xu /* 203c8efc1SHerbert Xu * af_alg: User-space algorithm interface 303c8efc1SHerbert Xu * 403c8efc1SHerbert Xu * This file provides the user-space API for algorithms. 503c8efc1SHerbert Xu * 603c8efc1SHerbert Xu * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 703c8efc1SHerbert Xu * 803c8efc1SHerbert Xu * This program is free software; you can redistribute it and/or modify it 903c8efc1SHerbert Xu * under the terms of the GNU General Public License as published by the Free 1003c8efc1SHerbert Xu * Software Foundation; either version 2 of the License, or (at your option) 1103c8efc1SHerbert Xu * any later version. 1203c8efc1SHerbert Xu * 1303c8efc1SHerbert Xu */ 1403c8efc1SHerbert Xu 1560063497SArun Sharma #include <linux/atomic.h> 1603c8efc1SHerbert Xu #include <crypto/if_alg.h> 1703c8efc1SHerbert Xu #include <linux/crypto.h> 1803c8efc1SHerbert Xu #include <linux/init.h> 1903c8efc1SHerbert Xu #include <linux/kernel.h> 2003c8efc1SHerbert Xu #include <linux/list.h> 2103c8efc1SHerbert Xu #include <linux/module.h> 2203c8efc1SHerbert Xu #include <linux/net.h> 2303c8efc1SHerbert Xu #include <linux/rwsem.h> 242d97591eSStephan Mueller #include <linux/sched/signal.h> 254c63f83cSMilan Broz #include <linux/security.h> 2603c8efc1SHerbert Xu 2703c8efc1SHerbert Xu struct alg_type_list { 2803c8efc1SHerbert Xu const struct af_alg_type *type; 2903c8efc1SHerbert Xu struct list_head list; 3003c8efc1SHerbert Xu }; 3103c8efc1SHerbert Xu 3206869524SRandy Dunlap static atomic_long_t alg_memory_allocated; 3303c8efc1SHerbert Xu 3403c8efc1SHerbert Xu static struct proto alg_proto = { 3503c8efc1SHerbert Xu .name = "ALG", 3603c8efc1SHerbert Xu .owner = THIS_MODULE, 3703c8efc1SHerbert Xu .memory_allocated = &alg_memory_allocated, 3803c8efc1SHerbert Xu .obj_size = sizeof(struct alg_sock), 3903c8efc1SHerbert Xu }; 4003c8efc1SHerbert Xu 4103c8efc1SHerbert Xu static LIST_HEAD(alg_types); 4203c8efc1SHerbert Xu static DECLARE_RWSEM(alg_types_sem); 4303c8efc1SHerbert Xu 4403c8efc1SHerbert Xu static const struct af_alg_type *alg_get_type(const char *name) 4503c8efc1SHerbert Xu { 4603c8efc1SHerbert Xu const struct af_alg_type *type = ERR_PTR(-ENOENT); 4703c8efc1SHerbert Xu struct alg_type_list *node; 4803c8efc1SHerbert Xu 4903c8efc1SHerbert Xu down_read(&alg_types_sem); 5003c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) { 5103c8efc1SHerbert Xu if (strcmp(node->type->name, name)) 5203c8efc1SHerbert Xu continue; 5303c8efc1SHerbert Xu 5403c8efc1SHerbert Xu if (try_module_get(node->type->owner)) 5503c8efc1SHerbert Xu type = node->type; 5603c8efc1SHerbert Xu break; 5703c8efc1SHerbert Xu } 5803c8efc1SHerbert Xu up_read(&alg_types_sem); 5903c8efc1SHerbert Xu 6003c8efc1SHerbert Xu return type; 6103c8efc1SHerbert Xu } 6203c8efc1SHerbert Xu 6303c8efc1SHerbert Xu int af_alg_register_type(const struct af_alg_type *type) 6403c8efc1SHerbert Xu { 6503c8efc1SHerbert Xu struct alg_type_list *node; 6603c8efc1SHerbert Xu int err = -EEXIST; 6703c8efc1SHerbert Xu 6803c8efc1SHerbert Xu down_write(&alg_types_sem); 6903c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) { 7003c8efc1SHerbert Xu if (!strcmp(node->type->name, type->name)) 7103c8efc1SHerbert Xu goto unlock; 7203c8efc1SHerbert Xu } 7303c8efc1SHerbert Xu 7403c8efc1SHerbert Xu node = kmalloc(sizeof(*node), GFP_KERNEL); 7503c8efc1SHerbert Xu err = -ENOMEM; 7603c8efc1SHerbert Xu if (!node) 7703c8efc1SHerbert Xu goto unlock; 7803c8efc1SHerbert Xu 7903c8efc1SHerbert Xu type->ops->owner = THIS_MODULE; 8037766586SHerbert Xu if (type->ops_nokey) 8137766586SHerbert Xu type->ops_nokey->owner = THIS_MODULE; 8203c8efc1SHerbert Xu node->type = type; 8303c8efc1SHerbert Xu list_add(&node->list, &alg_types); 8403c8efc1SHerbert Xu err = 0; 8503c8efc1SHerbert Xu 8603c8efc1SHerbert Xu unlock: 8703c8efc1SHerbert Xu up_write(&alg_types_sem); 8803c8efc1SHerbert Xu 8903c8efc1SHerbert Xu return err; 9003c8efc1SHerbert Xu } 9103c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_register_type); 9203c8efc1SHerbert Xu 9303c8efc1SHerbert Xu int af_alg_unregister_type(const struct af_alg_type *type) 9403c8efc1SHerbert Xu { 9503c8efc1SHerbert Xu struct alg_type_list *node; 9603c8efc1SHerbert Xu int err = -ENOENT; 9703c8efc1SHerbert Xu 9803c8efc1SHerbert Xu down_write(&alg_types_sem); 9903c8efc1SHerbert Xu list_for_each_entry(node, &alg_types, list) { 10003c8efc1SHerbert Xu if (strcmp(node->type->name, type->name)) 10103c8efc1SHerbert Xu continue; 10203c8efc1SHerbert Xu 10303c8efc1SHerbert Xu list_del(&node->list); 10403c8efc1SHerbert Xu kfree(node); 10503c8efc1SHerbert Xu err = 0; 10603c8efc1SHerbert Xu break; 10703c8efc1SHerbert Xu } 10803c8efc1SHerbert Xu up_write(&alg_types_sem); 10903c8efc1SHerbert Xu 11003c8efc1SHerbert Xu return err; 11103c8efc1SHerbert Xu } 11203c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_unregister_type); 11303c8efc1SHerbert Xu 11403c8efc1SHerbert Xu static void alg_do_release(const struct af_alg_type *type, void *private) 11503c8efc1SHerbert Xu { 11603c8efc1SHerbert Xu if (!type) 11703c8efc1SHerbert Xu return; 11803c8efc1SHerbert Xu 11903c8efc1SHerbert Xu type->release(private); 12003c8efc1SHerbert Xu module_put(type->owner); 12103c8efc1SHerbert Xu } 12203c8efc1SHerbert Xu 12303c8efc1SHerbert Xu int af_alg_release(struct socket *sock) 12403c8efc1SHerbert Xu { 12503c8efc1SHerbert Xu if (sock->sk) 12603c8efc1SHerbert Xu sock_put(sock->sk); 12703c8efc1SHerbert Xu return 0; 12803c8efc1SHerbert Xu } 12903c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_release); 13003c8efc1SHerbert Xu 131c840ac6aSHerbert Xu void af_alg_release_parent(struct sock *sk) 132c840ac6aSHerbert Xu { 133c840ac6aSHerbert Xu struct alg_sock *ask = alg_sk(sk); 134a6a48c56SHerbert Xu unsigned int nokey = ask->nokey_refcnt; 135a6a48c56SHerbert Xu bool last = nokey && !ask->refcnt; 136c840ac6aSHerbert Xu 137c840ac6aSHerbert Xu sk = ask->parent; 138c840ac6aSHerbert Xu ask = alg_sk(sk); 139c840ac6aSHerbert Xu 140c840ac6aSHerbert Xu lock_sock(sk); 141a6a48c56SHerbert Xu ask->nokey_refcnt -= nokey; 142a6a48c56SHerbert Xu if (!last) 143c840ac6aSHerbert Xu last = !--ask->refcnt; 144c840ac6aSHerbert Xu release_sock(sk); 145c840ac6aSHerbert Xu 146c840ac6aSHerbert Xu if (last) 147c840ac6aSHerbert Xu sock_put(sk); 148c840ac6aSHerbert Xu } 149c840ac6aSHerbert Xu EXPORT_SYMBOL_GPL(af_alg_release_parent); 150c840ac6aSHerbert Xu 15103c8efc1SHerbert Xu static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 15203c8efc1SHerbert Xu { 15315539de5SHerbert Xu const u32 forbidden = CRYPTO_ALG_INTERNAL; 15403c8efc1SHerbert Xu struct sock *sk = sock->sk; 15503c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 15603c8efc1SHerbert Xu struct sockaddr_alg *sa = (void *)uaddr; 15703c8efc1SHerbert Xu const struct af_alg_type *type; 15803c8efc1SHerbert Xu void *private; 159c840ac6aSHerbert Xu int err; 16003c8efc1SHerbert Xu 16103c8efc1SHerbert Xu if (sock->state == SS_CONNECTED) 16203c8efc1SHerbert Xu return -EINVAL; 16303c8efc1SHerbert Xu 1643f69cc60SHerbert Xu if (addr_len < sizeof(*sa)) 16503c8efc1SHerbert Xu return -EINVAL; 16603c8efc1SHerbert Xu 16703c8efc1SHerbert Xu sa->salg_type[sizeof(sa->salg_type) - 1] = 0; 1683f69cc60SHerbert Xu sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0; 16903c8efc1SHerbert Xu 17003c8efc1SHerbert Xu type = alg_get_type(sa->salg_type); 17103c8efc1SHerbert Xu if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) { 17203c8efc1SHerbert Xu request_module("algif-%s", sa->salg_type); 17303c8efc1SHerbert Xu type = alg_get_type(sa->salg_type); 17403c8efc1SHerbert Xu } 17503c8efc1SHerbert Xu 17603c8efc1SHerbert Xu if (IS_ERR(type)) 17703c8efc1SHerbert Xu return PTR_ERR(type); 17803c8efc1SHerbert Xu 17915539de5SHerbert Xu private = type->bind(sa->salg_name, 18015539de5SHerbert Xu sa->salg_feat & ~forbidden, 18115539de5SHerbert Xu sa->salg_mask & ~forbidden); 18203c8efc1SHerbert Xu if (IS_ERR(private)) { 18303c8efc1SHerbert Xu module_put(type->owner); 18403c8efc1SHerbert Xu return PTR_ERR(private); 18503c8efc1SHerbert Xu } 18603c8efc1SHerbert Xu 187c840ac6aSHerbert Xu err = -EBUSY; 18803c8efc1SHerbert Xu lock_sock(sk); 189a6a48c56SHerbert Xu if (ask->refcnt | ask->nokey_refcnt) 190c840ac6aSHerbert Xu goto unlock; 19103c8efc1SHerbert Xu 19203c8efc1SHerbert Xu swap(ask->type, type); 19303c8efc1SHerbert Xu swap(ask->private, private); 19403c8efc1SHerbert Xu 195c840ac6aSHerbert Xu err = 0; 196c840ac6aSHerbert Xu 197c840ac6aSHerbert Xu unlock: 19803c8efc1SHerbert Xu release_sock(sk); 19903c8efc1SHerbert Xu 20003c8efc1SHerbert Xu alg_do_release(type, private); 20103c8efc1SHerbert Xu 202c840ac6aSHerbert Xu return err; 20303c8efc1SHerbert Xu } 20403c8efc1SHerbert Xu 20503c8efc1SHerbert Xu static int alg_setkey(struct sock *sk, char __user *ukey, 20603c8efc1SHerbert Xu unsigned int keylen) 20703c8efc1SHerbert Xu { 20803c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 20903c8efc1SHerbert Xu const struct af_alg_type *type = ask->type; 21003c8efc1SHerbert Xu u8 *key; 21103c8efc1SHerbert Xu int err; 21203c8efc1SHerbert Xu 21303c8efc1SHerbert Xu key = sock_kmalloc(sk, keylen, GFP_KERNEL); 21403c8efc1SHerbert Xu if (!key) 21503c8efc1SHerbert Xu return -ENOMEM; 21603c8efc1SHerbert Xu 21703c8efc1SHerbert Xu err = -EFAULT; 21803c8efc1SHerbert Xu if (copy_from_user(key, ukey, keylen)) 21903c8efc1SHerbert Xu goto out; 22003c8efc1SHerbert Xu 22103c8efc1SHerbert Xu err = type->setkey(ask->private, key, keylen); 22203c8efc1SHerbert Xu 22303c8efc1SHerbert Xu out: 224ad202c8cSStephan Mueller sock_kzfree_s(sk, key, keylen); 22503c8efc1SHerbert Xu 22603c8efc1SHerbert Xu return err; 22703c8efc1SHerbert Xu } 22803c8efc1SHerbert Xu 22903c8efc1SHerbert Xu static int alg_setsockopt(struct socket *sock, int level, int optname, 23003c8efc1SHerbert Xu char __user *optval, unsigned int optlen) 23103c8efc1SHerbert Xu { 23203c8efc1SHerbert Xu struct sock *sk = sock->sk; 23303c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 23403c8efc1SHerbert Xu const struct af_alg_type *type; 235c840ac6aSHerbert Xu int err = -EBUSY; 23603c8efc1SHerbert Xu 23703c8efc1SHerbert Xu lock_sock(sk); 238c840ac6aSHerbert Xu if (ask->refcnt) 239c840ac6aSHerbert Xu goto unlock; 240c840ac6aSHerbert Xu 24103c8efc1SHerbert Xu type = ask->type; 24203c8efc1SHerbert Xu 243c840ac6aSHerbert Xu err = -ENOPROTOOPT; 24403c8efc1SHerbert Xu if (level != SOL_ALG || !type) 24503c8efc1SHerbert Xu goto unlock; 24603c8efc1SHerbert Xu 24703c8efc1SHerbert Xu switch (optname) { 24803c8efc1SHerbert Xu case ALG_SET_KEY: 24903c8efc1SHerbert Xu if (sock->state == SS_CONNECTED) 25003c8efc1SHerbert Xu goto unlock; 25103c8efc1SHerbert Xu if (!type->setkey) 25203c8efc1SHerbert Xu goto unlock; 25303c8efc1SHerbert Xu 25403c8efc1SHerbert Xu err = alg_setkey(sk, optval, optlen); 25525fb8638SStephan Mueller break; 25625fb8638SStephan Mueller case ALG_SET_AEAD_AUTHSIZE: 25725fb8638SStephan Mueller if (sock->state == SS_CONNECTED) 25825fb8638SStephan Mueller goto unlock; 25925fb8638SStephan Mueller if (!type->setauthsize) 26025fb8638SStephan Mueller goto unlock; 26125fb8638SStephan Mueller err = type->setauthsize(ask->private, optlen); 26203c8efc1SHerbert Xu } 26303c8efc1SHerbert Xu 26403c8efc1SHerbert Xu unlock: 26503c8efc1SHerbert Xu release_sock(sk); 26603c8efc1SHerbert Xu 26703c8efc1SHerbert Xu return err; 26803c8efc1SHerbert Xu } 26903c8efc1SHerbert Xu 270cdfbabfbSDavid Howells int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern) 27103c8efc1SHerbert Xu { 27203c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 27303c8efc1SHerbert Xu const struct af_alg_type *type; 27403c8efc1SHerbert Xu struct sock *sk2; 2756a935170SHerbert Xu unsigned int nokey; 27603c8efc1SHerbert Xu int err; 27703c8efc1SHerbert Xu 27803c8efc1SHerbert Xu lock_sock(sk); 27903c8efc1SHerbert Xu type = ask->type; 28003c8efc1SHerbert Xu 28103c8efc1SHerbert Xu err = -EINVAL; 28203c8efc1SHerbert Xu if (!type) 28303c8efc1SHerbert Xu goto unlock; 28403c8efc1SHerbert Xu 285cdfbabfbSDavid Howells sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern); 28603c8efc1SHerbert Xu err = -ENOMEM; 28703c8efc1SHerbert Xu if (!sk2) 28803c8efc1SHerbert Xu goto unlock; 28903c8efc1SHerbert Xu 29003c8efc1SHerbert Xu sock_init_data(newsock, sk2); 2912acce6aaSHerbert Xu security_sock_graft(sk2, newsock); 2924c63f83cSMilan Broz security_sk_clone(sk, sk2); 29303c8efc1SHerbert Xu 29403c8efc1SHerbert Xu err = type->accept(ask->private, sk2); 29537766586SHerbert Xu 29637766586SHerbert Xu nokey = err == -ENOKEY; 29737766586SHerbert Xu if (nokey && type->accept_nokey) 29837766586SHerbert Xu err = type->accept_nokey(ask->private, sk2); 29937766586SHerbert Xu 300a383292cSHerbert Xu if (err) 30103c8efc1SHerbert Xu goto unlock; 30203c8efc1SHerbert Xu 30303c8efc1SHerbert Xu sk2->sk_family = PF_ALG; 30403c8efc1SHerbert Xu 30537766586SHerbert Xu if (nokey || !ask->refcnt++) 30603c8efc1SHerbert Xu sock_hold(sk); 307a6a48c56SHerbert Xu ask->nokey_refcnt += nokey; 30803c8efc1SHerbert Xu alg_sk(sk2)->parent = sk; 30903c8efc1SHerbert Xu alg_sk(sk2)->type = type; 3106a935170SHerbert Xu alg_sk(sk2)->nokey_refcnt = nokey; 31103c8efc1SHerbert Xu 31203c8efc1SHerbert Xu newsock->ops = type->ops; 31303c8efc1SHerbert Xu newsock->state = SS_CONNECTED; 31403c8efc1SHerbert Xu 31537766586SHerbert Xu if (nokey) 31637766586SHerbert Xu newsock->ops = type->ops_nokey; 31737766586SHerbert Xu 31803c8efc1SHerbert Xu err = 0; 31903c8efc1SHerbert Xu 32003c8efc1SHerbert Xu unlock: 32103c8efc1SHerbert Xu release_sock(sk); 32203c8efc1SHerbert Xu 32303c8efc1SHerbert Xu return err; 32403c8efc1SHerbert Xu } 32503c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_accept); 32603c8efc1SHerbert Xu 327cdfbabfbSDavid Howells static int alg_accept(struct socket *sock, struct socket *newsock, int flags, 328cdfbabfbSDavid Howells bool kern) 32903c8efc1SHerbert Xu { 330cdfbabfbSDavid Howells return af_alg_accept(sock->sk, newsock, kern); 33103c8efc1SHerbert Xu } 33203c8efc1SHerbert Xu 33303c8efc1SHerbert Xu static const struct proto_ops alg_proto_ops = { 33403c8efc1SHerbert Xu .family = PF_ALG, 33503c8efc1SHerbert Xu .owner = THIS_MODULE, 33603c8efc1SHerbert Xu 33703c8efc1SHerbert Xu .connect = sock_no_connect, 33803c8efc1SHerbert Xu .socketpair = sock_no_socketpair, 33903c8efc1SHerbert Xu .getname = sock_no_getname, 34003c8efc1SHerbert Xu .ioctl = sock_no_ioctl, 34103c8efc1SHerbert Xu .listen = sock_no_listen, 34203c8efc1SHerbert Xu .shutdown = sock_no_shutdown, 34303c8efc1SHerbert Xu .getsockopt = sock_no_getsockopt, 34403c8efc1SHerbert Xu .mmap = sock_no_mmap, 34503c8efc1SHerbert Xu .sendpage = sock_no_sendpage, 34603c8efc1SHerbert Xu .sendmsg = sock_no_sendmsg, 34703c8efc1SHerbert Xu .recvmsg = sock_no_recvmsg, 34803c8efc1SHerbert Xu .poll = sock_no_poll, 34903c8efc1SHerbert Xu 35003c8efc1SHerbert Xu .bind = alg_bind, 35103c8efc1SHerbert Xu .release = af_alg_release, 35203c8efc1SHerbert Xu .setsockopt = alg_setsockopt, 35303c8efc1SHerbert Xu .accept = alg_accept, 35403c8efc1SHerbert Xu }; 35503c8efc1SHerbert Xu 35603c8efc1SHerbert Xu static void alg_sock_destruct(struct sock *sk) 35703c8efc1SHerbert Xu { 35803c8efc1SHerbert Xu struct alg_sock *ask = alg_sk(sk); 35903c8efc1SHerbert Xu 36003c8efc1SHerbert Xu alg_do_release(ask->type, ask->private); 36103c8efc1SHerbert Xu } 36203c8efc1SHerbert Xu 36303c8efc1SHerbert Xu static int alg_create(struct net *net, struct socket *sock, int protocol, 36403c8efc1SHerbert Xu int kern) 36503c8efc1SHerbert Xu { 36603c8efc1SHerbert Xu struct sock *sk; 36703c8efc1SHerbert Xu int err; 36803c8efc1SHerbert Xu 36903c8efc1SHerbert Xu if (sock->type != SOCK_SEQPACKET) 37003c8efc1SHerbert Xu return -ESOCKTNOSUPPORT; 37103c8efc1SHerbert Xu if (protocol != 0) 37203c8efc1SHerbert Xu return -EPROTONOSUPPORT; 37303c8efc1SHerbert Xu 37403c8efc1SHerbert Xu err = -ENOMEM; 37511aa9c28SEric W. Biederman sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern); 37603c8efc1SHerbert Xu if (!sk) 37703c8efc1SHerbert Xu goto out; 37803c8efc1SHerbert Xu 37903c8efc1SHerbert Xu sock->ops = &alg_proto_ops; 38003c8efc1SHerbert Xu sock_init_data(sock, sk); 38103c8efc1SHerbert Xu 38203c8efc1SHerbert Xu sk->sk_family = PF_ALG; 38303c8efc1SHerbert Xu sk->sk_destruct = alg_sock_destruct; 38403c8efc1SHerbert Xu 38503c8efc1SHerbert Xu return 0; 38603c8efc1SHerbert Xu out: 38703c8efc1SHerbert Xu return err; 38803c8efc1SHerbert Xu } 38903c8efc1SHerbert Xu 39003c8efc1SHerbert Xu static const struct net_proto_family alg_family = { 39103c8efc1SHerbert Xu .family = PF_ALG, 39203c8efc1SHerbert Xu .create = alg_create, 39303c8efc1SHerbert Xu .owner = THIS_MODULE, 39403c8efc1SHerbert Xu }; 39503c8efc1SHerbert Xu 3961d10eb2fSAl Viro int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len) 39703c8efc1SHerbert Xu { 3981d10eb2fSAl Viro size_t off; 3991d10eb2fSAl Viro ssize_t n; 4001d10eb2fSAl Viro int npages, i; 40103c8efc1SHerbert Xu 4021d10eb2fSAl Viro n = iov_iter_get_pages(iter, sgl->pages, len, ALG_MAX_PAGES, &off); 4031d10eb2fSAl Viro if (n < 0) 4041d10eb2fSAl Viro return n; 40503c8efc1SHerbert Xu 4069399f0c5SLinus Torvalds npages = (off + n + PAGE_SIZE - 1) >> PAGE_SHIFT; 40703c8efc1SHerbert Xu if (WARN_ON(npages == 0)) 4081d10eb2fSAl Viro return -EINVAL; 40966db3739STadeusz Struk /* Add one extra for linking */ 41066db3739STadeusz Struk sg_init_table(sgl->sg, npages + 1); 41103c8efc1SHerbert Xu 4121d10eb2fSAl Viro for (i = 0, len = n; i < npages; i++) { 41303c8efc1SHerbert Xu int plen = min_t(int, len, PAGE_SIZE - off); 41403c8efc1SHerbert Xu 41503c8efc1SHerbert Xu sg_set_page(sgl->sg + i, sgl->pages[i], plen, off); 41603c8efc1SHerbert Xu 41703c8efc1SHerbert Xu off = 0; 41803c8efc1SHerbert Xu len -= plen; 41903c8efc1SHerbert Xu } 42066db3739STadeusz Struk sg_mark_end(sgl->sg + npages - 1); 42166db3739STadeusz Struk sgl->npages = npages; 42266db3739STadeusz Struk 4231d10eb2fSAl Viro return n; 42403c8efc1SHerbert Xu } 42503c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_make_sg); 42603c8efc1SHerbert Xu 42766db3739STadeusz Struk void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) 42866db3739STadeusz Struk { 42966db3739STadeusz Struk sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1); 43066db3739STadeusz Struk sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg); 43166db3739STadeusz Struk } 432bd507520STadeusz Struk EXPORT_SYMBOL_GPL(af_alg_link_sg); 43366db3739STadeusz Struk 43403c8efc1SHerbert Xu void af_alg_free_sg(struct af_alg_sgl *sgl) 43503c8efc1SHerbert Xu { 43603c8efc1SHerbert Xu int i; 43703c8efc1SHerbert Xu 43866db3739STadeusz Struk for (i = 0; i < sgl->npages; i++) 43903c8efc1SHerbert Xu put_page(sgl->pages[i]); 44003c8efc1SHerbert Xu } 44103c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_free_sg); 44203c8efc1SHerbert Xu 44303c8efc1SHerbert Xu int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) 44403c8efc1SHerbert Xu { 44503c8efc1SHerbert Xu struct cmsghdr *cmsg; 44603c8efc1SHerbert Xu 447f95b414eSGu Zheng for_each_cmsghdr(cmsg, msg) { 44803c8efc1SHerbert Xu if (!CMSG_OK(msg, cmsg)) 44903c8efc1SHerbert Xu return -EINVAL; 45003c8efc1SHerbert Xu if (cmsg->cmsg_level != SOL_ALG) 45103c8efc1SHerbert Xu continue; 45203c8efc1SHerbert Xu 45303c8efc1SHerbert Xu switch (cmsg->cmsg_type) { 45403c8efc1SHerbert Xu case ALG_SET_IV: 45503c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) 45603c8efc1SHerbert Xu return -EINVAL; 45703c8efc1SHerbert Xu con->iv = (void *)CMSG_DATA(cmsg); 45803c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen + 45903c8efc1SHerbert Xu sizeof(*con->iv))) 46003c8efc1SHerbert Xu return -EINVAL; 46103c8efc1SHerbert Xu break; 46203c8efc1SHerbert Xu 46303c8efc1SHerbert Xu case ALG_SET_OP: 46403c8efc1SHerbert Xu if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) 46503c8efc1SHerbert Xu return -EINVAL; 46603c8efc1SHerbert Xu con->op = *(u32 *)CMSG_DATA(cmsg); 46703c8efc1SHerbert Xu break; 46803c8efc1SHerbert Xu 469af8e8073SStephan Mueller case ALG_SET_AEAD_ASSOCLEN: 470af8e8073SStephan Mueller if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) 471af8e8073SStephan Mueller return -EINVAL; 472af8e8073SStephan Mueller con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg); 473af8e8073SStephan Mueller break; 474af8e8073SStephan Mueller 47503c8efc1SHerbert Xu default: 47603c8efc1SHerbert Xu return -EINVAL; 47703c8efc1SHerbert Xu } 47803c8efc1SHerbert Xu } 47903c8efc1SHerbert Xu 48003c8efc1SHerbert Xu return 0; 48103c8efc1SHerbert Xu } 48203c8efc1SHerbert Xu EXPORT_SYMBOL_GPL(af_alg_cmsg_send); 48303c8efc1SHerbert Xu 4842d97591eSStephan Mueller /** 4852d97591eSStephan Mueller * af_alg_alloc_tsgl - allocate the TX SGL 4862d97591eSStephan Mueller * 4872d97591eSStephan Mueller * @sk socket of connection to user space 4882d97591eSStephan Mueller * @return: 0 upon success, < 0 upon error 4892d97591eSStephan Mueller */ 4902d97591eSStephan Mueller int af_alg_alloc_tsgl(struct sock *sk) 4912d97591eSStephan Mueller { 4922d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 4932d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 4942d97591eSStephan Mueller struct af_alg_tsgl *sgl; 4952d97591eSStephan Mueller struct scatterlist *sg = NULL; 4962d97591eSStephan Mueller 4972d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); 4982d97591eSStephan Mueller if (!list_empty(&ctx->tsgl_list)) 4992d97591eSStephan Mueller sg = sgl->sg; 5002d97591eSStephan Mueller 5012d97591eSStephan Mueller if (!sg || sgl->cur >= MAX_SGL_ENTS) { 5022d97591eSStephan Mueller sgl = sock_kmalloc(sk, sizeof(*sgl) + 5032d97591eSStephan Mueller sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), 5042d97591eSStephan Mueller GFP_KERNEL); 5052d97591eSStephan Mueller if (!sgl) 5062d97591eSStephan Mueller return -ENOMEM; 5072d97591eSStephan Mueller 5082d97591eSStephan Mueller sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); 5092d97591eSStephan Mueller sgl->cur = 0; 5102d97591eSStephan Mueller 5112d97591eSStephan Mueller if (sg) 5122d97591eSStephan Mueller sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); 5132d97591eSStephan Mueller 5142d97591eSStephan Mueller list_add_tail(&sgl->list, &ctx->tsgl_list); 5152d97591eSStephan Mueller } 5162d97591eSStephan Mueller 5172d97591eSStephan Mueller return 0; 5182d97591eSStephan Mueller } 5192d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl); 5202d97591eSStephan Mueller 5212d97591eSStephan Mueller /** 5222d97591eSStephan Mueller * aead_count_tsgl - Count number of TX SG entries 5232d97591eSStephan Mueller * 5242d97591eSStephan Mueller * The counting starts from the beginning of the SGL to @bytes. If 5252d97591eSStephan Mueller * an offset is provided, the counting of the SG entries starts at the offset. 5262d97591eSStephan Mueller * 5272d97591eSStephan Mueller * @sk socket of connection to user space 5282d97591eSStephan Mueller * @bytes Count the number of SG entries holding given number of bytes. 5292d97591eSStephan Mueller * @offset Start the counting of SG entries from the given offset. 5302d97591eSStephan Mueller * @return Number of TX SG entries found given the constraints 5312d97591eSStephan Mueller */ 5322d97591eSStephan Mueller unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) 5332d97591eSStephan Mueller { 5342d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5352d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 5362d97591eSStephan Mueller struct af_alg_tsgl *sgl, *tmp; 5372d97591eSStephan Mueller unsigned int i; 5382d97591eSStephan Mueller unsigned int sgl_count = 0; 5392d97591eSStephan Mueller 5402d97591eSStephan Mueller if (!bytes) 5412d97591eSStephan Mueller return 0; 5422d97591eSStephan Mueller 5432d97591eSStephan Mueller list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { 5442d97591eSStephan Mueller struct scatterlist *sg = sgl->sg; 5452d97591eSStephan Mueller 5462d97591eSStephan Mueller for (i = 0; i < sgl->cur; i++) { 5472d97591eSStephan Mueller size_t bytes_count; 5482d97591eSStephan Mueller 5492d97591eSStephan Mueller /* Skip offset */ 5502d97591eSStephan Mueller if (offset >= sg[i].length) { 5512d97591eSStephan Mueller offset -= sg[i].length; 5522d97591eSStephan Mueller bytes -= sg[i].length; 5532d97591eSStephan Mueller continue; 5542d97591eSStephan Mueller } 5552d97591eSStephan Mueller 5562d97591eSStephan Mueller bytes_count = sg[i].length - offset; 5572d97591eSStephan Mueller 5582d97591eSStephan Mueller offset = 0; 5592d97591eSStephan Mueller sgl_count++; 5602d97591eSStephan Mueller 5612d97591eSStephan Mueller /* If we have seen requested number of bytes, stop */ 5622d97591eSStephan Mueller if (bytes_count >= bytes) 5632d97591eSStephan Mueller return sgl_count; 5642d97591eSStephan Mueller 5652d97591eSStephan Mueller bytes -= bytes_count; 5662d97591eSStephan Mueller } 5672d97591eSStephan Mueller } 5682d97591eSStephan Mueller 5692d97591eSStephan Mueller return sgl_count; 5702d97591eSStephan Mueller } 5712d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_count_tsgl); 5722d97591eSStephan Mueller 5732d97591eSStephan Mueller /** 5742d97591eSStephan Mueller * aead_pull_tsgl - Release the specified buffers from TX SGL 5752d97591eSStephan Mueller * 5762d97591eSStephan Mueller * If @dst is non-null, reassign the pages to dst. The caller must release 5772d97591eSStephan Mueller * the pages. If @dst_offset is given only reassign the pages to @dst starting 5782d97591eSStephan Mueller * at the @dst_offset (byte). The caller must ensure that @dst is large 5792d97591eSStephan Mueller * enough (e.g. by using af_alg_count_tsgl with the same offset). 5802d97591eSStephan Mueller * 5812d97591eSStephan Mueller * @sk socket of connection to user space 5822d97591eSStephan Mueller * @used Number of bytes to pull from TX SGL 5832d97591eSStephan Mueller * @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The 5842d97591eSStephan Mueller * caller must release the buffers in dst. 5852d97591eSStephan Mueller * @dst_offset Reassign the TX SGL from given offset. All buffers before 5862d97591eSStephan Mueller * reaching the offset is released. 5872d97591eSStephan Mueller */ 5882d97591eSStephan Mueller void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, 5892d97591eSStephan Mueller size_t dst_offset) 5902d97591eSStephan Mueller { 5912d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 5922d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 5932d97591eSStephan Mueller struct af_alg_tsgl *sgl; 5942d97591eSStephan Mueller struct scatterlist *sg; 595e117765aSStephan Mueller unsigned int i, j = 0; 5962d97591eSStephan Mueller 5972d97591eSStephan Mueller while (!list_empty(&ctx->tsgl_list)) { 5982d97591eSStephan Mueller sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, 5992d97591eSStephan Mueller list); 6002d97591eSStephan Mueller sg = sgl->sg; 6012d97591eSStephan Mueller 602e117765aSStephan Mueller for (i = 0; i < sgl->cur; i++) { 6032d97591eSStephan Mueller size_t plen = min_t(size_t, used, sg[i].length); 6042d97591eSStephan Mueller struct page *page = sg_page(sg + i); 6052d97591eSStephan Mueller 6062d97591eSStephan Mueller if (!page) 6072d97591eSStephan Mueller continue; 6082d97591eSStephan Mueller 6092d97591eSStephan Mueller /* 6102d97591eSStephan Mueller * Assumption: caller created af_alg_count_tsgl(len) 6112d97591eSStephan Mueller * SG entries in dst. 6122d97591eSStephan Mueller */ 6132d97591eSStephan Mueller if (dst) { 6142d97591eSStephan Mueller if (dst_offset >= plen) { 6152d97591eSStephan Mueller /* discard page before offset */ 6162d97591eSStephan Mueller dst_offset -= plen; 6172d97591eSStephan Mueller } else { 6182d97591eSStephan Mueller /* reassign page to dst after offset */ 6192d45a7e8SStephan Mueller get_page(page); 6202d97591eSStephan Mueller sg_set_page(dst + j, page, 6212d97591eSStephan Mueller plen - dst_offset, 6222d97591eSStephan Mueller sg[i].offset + dst_offset); 6232d97591eSStephan Mueller dst_offset = 0; 6242d97591eSStephan Mueller j++; 6252d97591eSStephan Mueller } 6262d97591eSStephan Mueller } 6272d97591eSStephan Mueller 6282d97591eSStephan Mueller sg[i].length -= plen; 6292d97591eSStephan Mueller sg[i].offset += plen; 6302d97591eSStephan Mueller 6312d97591eSStephan Mueller used -= plen; 6322d97591eSStephan Mueller ctx->used -= plen; 6332d97591eSStephan Mueller 6342d97591eSStephan Mueller if (sg[i].length) 6352d97591eSStephan Mueller return; 6362d97591eSStephan Mueller 6372d97591eSStephan Mueller put_page(page); 6382d97591eSStephan Mueller sg_assign_page(sg + i, NULL); 6392d97591eSStephan Mueller } 6402d97591eSStephan Mueller 6412d97591eSStephan Mueller list_del(&sgl->list); 6422d97591eSStephan Mueller sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * 6432d97591eSStephan Mueller (MAX_SGL_ENTS + 1)); 6442d97591eSStephan Mueller } 6452d97591eSStephan Mueller 6462d97591eSStephan Mueller if (!ctx->used) 6472d97591eSStephan Mueller ctx->merge = 0; 6482d97591eSStephan Mueller } 6492d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); 6502d97591eSStephan Mueller 6512d97591eSStephan Mueller /** 6522d97591eSStephan Mueller * af_alg_free_areq_sgls - Release TX and RX SGLs of the request 6532d97591eSStephan Mueller * 6542d97591eSStephan Mueller * @areq Request holding the TX and RX SGL 6552d97591eSStephan Mueller */ 6562d97591eSStephan Mueller void af_alg_free_areq_sgls(struct af_alg_async_req *areq) 6572d97591eSStephan Mueller { 6582d97591eSStephan Mueller struct sock *sk = areq->sk; 6592d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 6602d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 6612d97591eSStephan Mueller struct af_alg_rsgl *rsgl, *tmp; 6622d97591eSStephan Mueller struct scatterlist *tsgl; 6632d97591eSStephan Mueller struct scatterlist *sg; 6642d97591eSStephan Mueller unsigned int i; 6652d97591eSStephan Mueller 6662d97591eSStephan Mueller list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { 6672d97591eSStephan Mueller ctx->rcvused -= rsgl->sg_num_bytes; 6682d97591eSStephan Mueller af_alg_free_sg(&rsgl->sgl); 6692d97591eSStephan Mueller list_del(&rsgl->list); 6702d97591eSStephan Mueller if (rsgl != &areq->first_rsgl) 6712d97591eSStephan Mueller sock_kfree_s(sk, rsgl, sizeof(*rsgl)); 6722d97591eSStephan Mueller } 6732d97591eSStephan Mueller 6742d97591eSStephan Mueller tsgl = areq->tsgl; 6752d97591eSStephan Mueller for_each_sg(tsgl, sg, areq->tsgl_entries, i) { 6762d97591eSStephan Mueller if (!sg_page(sg)) 6772d97591eSStephan Mueller continue; 6782d97591eSStephan Mueller put_page(sg_page(sg)); 6792d97591eSStephan Mueller } 6802d97591eSStephan Mueller 6812d97591eSStephan Mueller if (areq->tsgl && areq->tsgl_entries) 6822d97591eSStephan Mueller sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); 6832d97591eSStephan Mueller } 6842d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); 6852d97591eSStephan Mueller 6862d97591eSStephan Mueller /** 6872d97591eSStephan Mueller * af_alg_wait_for_wmem - wait for availability of writable memory 6882d97591eSStephan Mueller * 6892d97591eSStephan Mueller * @sk socket of connection to user space 6902d97591eSStephan Mueller * @flags If MSG_DONTWAIT is set, then only report if function would sleep 6912d97591eSStephan Mueller * @return 0 when writable memory is available, < 0 upon error 6922d97591eSStephan Mueller */ 6932d97591eSStephan Mueller int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) 6942d97591eSStephan Mueller { 6952d97591eSStephan Mueller DEFINE_WAIT_FUNC(wait, woken_wake_function); 6962d97591eSStephan Mueller int err = -ERESTARTSYS; 6972d97591eSStephan Mueller long timeout; 6982d97591eSStephan Mueller 6992d97591eSStephan Mueller if (flags & MSG_DONTWAIT) 7002d97591eSStephan Mueller return -EAGAIN; 7012d97591eSStephan Mueller 7022d97591eSStephan Mueller sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 7032d97591eSStephan Mueller 7042d97591eSStephan Mueller add_wait_queue(sk_sleep(sk), &wait); 7052d97591eSStephan Mueller for (;;) { 7062d97591eSStephan Mueller if (signal_pending(current)) 7072d97591eSStephan Mueller break; 7082d97591eSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT; 7092d97591eSStephan Mueller if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) { 7102d97591eSStephan Mueller err = 0; 7112d97591eSStephan Mueller break; 7122d97591eSStephan Mueller } 7132d97591eSStephan Mueller } 7142d97591eSStephan Mueller remove_wait_queue(sk_sleep(sk), &wait); 7152d97591eSStephan Mueller 7162d97591eSStephan Mueller return err; 7172d97591eSStephan Mueller } 7182d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wait_for_wmem); 7192d97591eSStephan Mueller 7202d97591eSStephan Mueller /** 7212d97591eSStephan Mueller * af_alg_wmem_wakeup - wakeup caller when writable memory is available 7222d97591eSStephan Mueller * 7232d97591eSStephan Mueller * @sk socket of connection to user space 7242d97591eSStephan Mueller */ 7252d97591eSStephan Mueller void af_alg_wmem_wakeup(struct sock *sk) 7262d97591eSStephan Mueller { 7272d97591eSStephan Mueller struct socket_wq *wq; 7282d97591eSStephan Mueller 7292d97591eSStephan Mueller if (!af_alg_writable(sk)) 7302d97591eSStephan Mueller return; 7312d97591eSStephan Mueller 7322d97591eSStephan Mueller rcu_read_lock(); 7332d97591eSStephan Mueller wq = rcu_dereference(sk->sk_wq); 7342d97591eSStephan Mueller if (skwq_has_sleeper(wq)) 7352d97591eSStephan Mueller wake_up_interruptible_sync_poll(&wq->wait, POLLIN | 7362d97591eSStephan Mueller POLLRDNORM | 7372d97591eSStephan Mueller POLLRDBAND); 7382d97591eSStephan Mueller sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 7392d97591eSStephan Mueller rcu_read_unlock(); 7402d97591eSStephan Mueller } 7412d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); 7422d97591eSStephan Mueller 7432d97591eSStephan Mueller /** 7442d97591eSStephan Mueller * af_alg_wait_for_data - wait for availability of TX data 7452d97591eSStephan Mueller * 7462d97591eSStephan Mueller * @sk socket of connection to user space 7472d97591eSStephan Mueller * @flags If MSG_DONTWAIT is set, then only report if function would sleep 7482d97591eSStephan Mueller * @return 0 when writable memory is available, < 0 upon error 7492d97591eSStephan Mueller */ 7502d97591eSStephan Mueller int af_alg_wait_for_data(struct sock *sk, unsigned flags) 7512d97591eSStephan Mueller { 7522d97591eSStephan Mueller DEFINE_WAIT_FUNC(wait, woken_wake_function); 7532d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 7542d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 7552d97591eSStephan Mueller long timeout; 7562d97591eSStephan Mueller int err = -ERESTARTSYS; 7572d97591eSStephan Mueller 7582d97591eSStephan Mueller if (flags & MSG_DONTWAIT) 7592d97591eSStephan Mueller return -EAGAIN; 7602d97591eSStephan Mueller 7612d97591eSStephan Mueller sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 7622d97591eSStephan Mueller 7632d97591eSStephan Mueller add_wait_queue(sk_sleep(sk), &wait); 7642d97591eSStephan Mueller for (;;) { 7652d97591eSStephan Mueller if (signal_pending(current)) 7662d97591eSStephan Mueller break; 7672d97591eSStephan Mueller timeout = MAX_SCHEDULE_TIMEOUT; 7682d97591eSStephan Mueller if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more), 7692d97591eSStephan Mueller &wait)) { 7702d97591eSStephan Mueller err = 0; 7712d97591eSStephan Mueller break; 7722d97591eSStephan Mueller } 7732d97591eSStephan Mueller } 7742d97591eSStephan Mueller remove_wait_queue(sk_sleep(sk), &wait); 7752d97591eSStephan Mueller 7762d97591eSStephan Mueller sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 7772d97591eSStephan Mueller 7782d97591eSStephan Mueller return err; 7792d97591eSStephan Mueller } 7802d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_wait_for_data); 7812d97591eSStephan Mueller 7822d97591eSStephan Mueller /** 7832d97591eSStephan Mueller * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel 7842d97591eSStephan Mueller * 7852d97591eSStephan Mueller * @sk socket of connection to user space 7862d97591eSStephan Mueller */ 7872d97591eSStephan Mueller 7882d97591eSStephan Mueller void af_alg_data_wakeup(struct sock *sk) 7892d97591eSStephan Mueller { 7902d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 7912d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 7922d97591eSStephan Mueller struct socket_wq *wq; 7932d97591eSStephan Mueller 7942d97591eSStephan Mueller if (!ctx->used) 7952d97591eSStephan Mueller return; 7962d97591eSStephan Mueller 7972d97591eSStephan Mueller rcu_read_lock(); 7982d97591eSStephan Mueller wq = rcu_dereference(sk->sk_wq); 7992d97591eSStephan Mueller if (skwq_has_sleeper(wq)) 8002d97591eSStephan Mueller wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 8012d97591eSStephan Mueller POLLRDNORM | 8022d97591eSStephan Mueller POLLRDBAND); 8032d97591eSStephan Mueller sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 8042d97591eSStephan Mueller rcu_read_unlock(); 8052d97591eSStephan Mueller } 8062d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_data_wakeup); 8072d97591eSStephan Mueller 8082d97591eSStephan Mueller /** 8092d97591eSStephan Mueller * af_alg_sendmsg - implementation of sendmsg system call handler 8102d97591eSStephan Mueller * 8112d97591eSStephan Mueller * The sendmsg system call handler obtains the user data and stores it 8122d97591eSStephan Mueller * in ctx->tsgl_list. This implies allocation of the required numbers of 8132d97591eSStephan Mueller * struct af_alg_tsgl. 8142d97591eSStephan Mueller * 8152d97591eSStephan Mueller * In addition, the ctx is filled with the information sent via CMSG. 8162d97591eSStephan Mueller * 8172d97591eSStephan Mueller * @sock socket of connection to user space 8182d97591eSStephan Mueller * @msg message from user space 8192d97591eSStephan Mueller * @size size of message from user space 8202d97591eSStephan Mueller * @ivsize the size of the IV for the cipher operation to verify that the 8212d97591eSStephan Mueller * user-space-provided IV has the right size 8222d97591eSStephan Mueller * @return the number of copied data upon success, < 0 upon error 8232d97591eSStephan Mueller */ 8242d97591eSStephan Mueller int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, 8252d97591eSStephan Mueller unsigned int ivsize) 8262d97591eSStephan Mueller { 8272d97591eSStephan Mueller struct sock *sk = sock->sk; 8282d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 8292d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 8302d97591eSStephan Mueller struct af_alg_tsgl *sgl; 8312d97591eSStephan Mueller struct af_alg_control con = {}; 8322d97591eSStephan Mueller long copied = 0; 8332d97591eSStephan Mueller bool enc = 0; 8342d97591eSStephan Mueller bool init = 0; 8352d97591eSStephan Mueller int err = 0; 8362d97591eSStephan Mueller 8372d97591eSStephan Mueller if (msg->msg_controllen) { 8382d97591eSStephan Mueller err = af_alg_cmsg_send(msg, &con); 8392d97591eSStephan Mueller if (err) 8402d97591eSStephan Mueller return err; 8412d97591eSStephan Mueller 8422d97591eSStephan Mueller init = 1; 8432d97591eSStephan Mueller switch (con.op) { 8442d97591eSStephan Mueller case ALG_OP_ENCRYPT: 8452d97591eSStephan Mueller enc = 1; 8462d97591eSStephan Mueller break; 8472d97591eSStephan Mueller case ALG_OP_DECRYPT: 8482d97591eSStephan Mueller enc = 0; 8492d97591eSStephan Mueller break; 8502d97591eSStephan Mueller default: 8512d97591eSStephan Mueller return -EINVAL; 8522d97591eSStephan Mueller } 8532d97591eSStephan Mueller 8542d97591eSStephan Mueller if (con.iv && con.iv->ivlen != ivsize) 8552d97591eSStephan Mueller return -EINVAL; 8562d97591eSStephan Mueller } 8572d97591eSStephan Mueller 8582d97591eSStephan Mueller lock_sock(sk); 8592d97591eSStephan Mueller if (!ctx->more && ctx->used) { 8602d97591eSStephan Mueller err = -EINVAL; 8612d97591eSStephan Mueller goto unlock; 8622d97591eSStephan Mueller } 8632d97591eSStephan Mueller 8642d97591eSStephan Mueller if (init) { 8652d97591eSStephan Mueller ctx->enc = enc; 8662d97591eSStephan Mueller if (con.iv) 8672d97591eSStephan Mueller memcpy(ctx->iv, con.iv->iv, ivsize); 8682d97591eSStephan Mueller 8692d97591eSStephan Mueller ctx->aead_assoclen = con.aead_assoclen; 8702d97591eSStephan Mueller } 8712d97591eSStephan Mueller 8722d97591eSStephan Mueller while (size) { 8732d97591eSStephan Mueller struct scatterlist *sg; 8742d97591eSStephan Mueller size_t len = size; 8752d97591eSStephan Mueller size_t plen; 8762d97591eSStephan Mueller 8772d97591eSStephan Mueller /* use the existing memory in an allocated page */ 8782d97591eSStephan Mueller if (ctx->merge) { 8792d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, 8802d97591eSStephan Mueller struct af_alg_tsgl, list); 8812d97591eSStephan Mueller sg = sgl->sg + sgl->cur - 1; 8822d97591eSStephan Mueller len = min_t(size_t, len, 8832d97591eSStephan Mueller PAGE_SIZE - sg->offset - sg->length); 8842d97591eSStephan Mueller 8852d97591eSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg)) + 8862d97591eSStephan Mueller sg->offset + sg->length, 8872d97591eSStephan Mueller msg, len); 8882d97591eSStephan Mueller if (err) 8892d97591eSStephan Mueller goto unlock; 8902d97591eSStephan Mueller 8912d97591eSStephan Mueller sg->length += len; 8922d97591eSStephan Mueller ctx->merge = (sg->offset + sg->length) & 8932d97591eSStephan Mueller (PAGE_SIZE - 1); 8942d97591eSStephan Mueller 8952d97591eSStephan Mueller ctx->used += len; 8962d97591eSStephan Mueller copied += len; 8972d97591eSStephan Mueller size -= len; 8982d97591eSStephan Mueller continue; 8992d97591eSStephan Mueller } 9002d97591eSStephan Mueller 9012d97591eSStephan Mueller if (!af_alg_writable(sk)) { 9022d97591eSStephan Mueller err = af_alg_wait_for_wmem(sk, msg->msg_flags); 9032d97591eSStephan Mueller if (err) 9042d97591eSStephan Mueller goto unlock; 9052d97591eSStephan Mueller } 9062d97591eSStephan Mueller 9072d97591eSStephan Mueller /* allocate a new page */ 9082d97591eSStephan Mueller len = min_t(unsigned long, len, af_alg_sndbuf(sk)); 9092d97591eSStephan Mueller 9102d97591eSStephan Mueller err = af_alg_alloc_tsgl(sk); 9112d97591eSStephan Mueller if (err) 9122d97591eSStephan Mueller goto unlock; 9132d97591eSStephan Mueller 9142d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, 9152d97591eSStephan Mueller list); 9162d97591eSStephan Mueller sg = sgl->sg; 9172d97591eSStephan Mueller if (sgl->cur) 9182d97591eSStephan Mueller sg_unmark_end(sg + sgl->cur - 1); 9192d97591eSStephan Mueller 9202d97591eSStephan Mueller do { 9212d97591eSStephan Mueller unsigned int i = sgl->cur; 9222d97591eSStephan Mueller 9232d97591eSStephan Mueller plen = min_t(size_t, len, PAGE_SIZE); 9242d97591eSStephan Mueller 9252d97591eSStephan Mueller sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); 9262d97591eSStephan Mueller if (!sg_page(sg + i)) { 9272d97591eSStephan Mueller err = -ENOMEM; 9282d97591eSStephan Mueller goto unlock; 9292d97591eSStephan Mueller } 9302d97591eSStephan Mueller 9312d97591eSStephan Mueller err = memcpy_from_msg(page_address(sg_page(sg + i)), 9322d97591eSStephan Mueller msg, plen); 9332d97591eSStephan Mueller if (err) { 9342d97591eSStephan Mueller __free_page(sg_page(sg + i)); 9352d97591eSStephan Mueller sg_assign_page(sg + i, NULL); 9362d97591eSStephan Mueller goto unlock; 9372d97591eSStephan Mueller } 9382d97591eSStephan Mueller 9392d97591eSStephan Mueller sg[i].length = plen; 9402d97591eSStephan Mueller len -= plen; 9412d97591eSStephan Mueller ctx->used += plen; 9422d97591eSStephan Mueller copied += plen; 9432d97591eSStephan Mueller size -= plen; 9442d97591eSStephan Mueller sgl->cur++; 9452d97591eSStephan Mueller } while (len && sgl->cur < MAX_SGL_ENTS); 9462d97591eSStephan Mueller 9472d97591eSStephan Mueller if (!size) 9482d97591eSStephan Mueller sg_mark_end(sg + sgl->cur - 1); 9492d97591eSStephan Mueller 9502d97591eSStephan Mueller ctx->merge = plen & (PAGE_SIZE - 1); 9512d97591eSStephan Mueller } 9522d97591eSStephan Mueller 9532d97591eSStephan Mueller err = 0; 9542d97591eSStephan Mueller 9552d97591eSStephan Mueller ctx->more = msg->msg_flags & MSG_MORE; 9562d97591eSStephan Mueller 9572d97591eSStephan Mueller unlock: 9582d97591eSStephan Mueller af_alg_data_wakeup(sk); 9592d97591eSStephan Mueller release_sock(sk); 9602d97591eSStephan Mueller 9612d97591eSStephan Mueller return copied ?: err; 9622d97591eSStephan Mueller } 9632d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_sendmsg); 9642d97591eSStephan Mueller 9652d97591eSStephan Mueller /** 9662d97591eSStephan Mueller * af_alg_sendpage - sendpage system call handler 9672d97591eSStephan Mueller * 9682d97591eSStephan Mueller * This is a generic implementation of sendpage to fill ctx->tsgl_list. 9692d97591eSStephan Mueller */ 9702d97591eSStephan Mueller ssize_t af_alg_sendpage(struct socket *sock, struct page *page, 9712d97591eSStephan Mueller int offset, size_t size, int flags) 9722d97591eSStephan Mueller { 9732d97591eSStephan Mueller struct sock *sk = sock->sk; 9742d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 9752d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 9762d97591eSStephan Mueller struct af_alg_tsgl *sgl; 9772d97591eSStephan Mueller int err = -EINVAL; 9782d97591eSStephan Mueller 9792d97591eSStephan Mueller if (flags & MSG_SENDPAGE_NOTLAST) 9802d97591eSStephan Mueller flags |= MSG_MORE; 9812d97591eSStephan Mueller 9822d97591eSStephan Mueller lock_sock(sk); 9832d97591eSStephan Mueller if (!ctx->more && ctx->used) 9842d97591eSStephan Mueller goto unlock; 9852d97591eSStephan Mueller 9862d97591eSStephan Mueller if (!size) 9872d97591eSStephan Mueller goto done; 9882d97591eSStephan Mueller 9892d97591eSStephan Mueller if (!af_alg_writable(sk)) { 9902d97591eSStephan Mueller err = af_alg_wait_for_wmem(sk, flags); 9912d97591eSStephan Mueller if (err) 9922d97591eSStephan Mueller goto unlock; 9932d97591eSStephan Mueller } 9942d97591eSStephan Mueller 9952d97591eSStephan Mueller err = af_alg_alloc_tsgl(sk); 9962d97591eSStephan Mueller if (err) 9972d97591eSStephan Mueller goto unlock; 9982d97591eSStephan Mueller 9992d97591eSStephan Mueller ctx->merge = 0; 10002d97591eSStephan Mueller sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); 10012d97591eSStephan Mueller 10022d97591eSStephan Mueller if (sgl->cur) 10032d97591eSStephan Mueller sg_unmark_end(sgl->sg + sgl->cur - 1); 10042d97591eSStephan Mueller 10052d97591eSStephan Mueller sg_mark_end(sgl->sg + sgl->cur); 10062d97591eSStephan Mueller 10072d97591eSStephan Mueller get_page(page); 10082d97591eSStephan Mueller sg_set_page(sgl->sg + sgl->cur, page, size, offset); 10092d97591eSStephan Mueller sgl->cur++; 10102d97591eSStephan Mueller ctx->used += size; 10112d97591eSStephan Mueller 10122d97591eSStephan Mueller done: 10132d97591eSStephan Mueller ctx->more = flags & MSG_MORE; 10142d97591eSStephan Mueller 10152d97591eSStephan Mueller unlock: 10162d97591eSStephan Mueller af_alg_data_wakeup(sk); 10172d97591eSStephan Mueller release_sock(sk); 10182d97591eSStephan Mueller 10192d97591eSStephan Mueller return err ?: size; 10202d97591eSStephan Mueller } 10212d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_sendpage); 10222d97591eSStephan Mueller 10232d97591eSStephan Mueller /** 10247d2c3f54SStephan Mueller * af_alg_free_resources - release resources required for crypto request 10257d2c3f54SStephan Mueller */ 10267d2c3f54SStephan Mueller void af_alg_free_resources(struct af_alg_async_req *areq) 10277d2c3f54SStephan Mueller { 10287d2c3f54SStephan Mueller struct sock *sk = areq->sk; 10297d2c3f54SStephan Mueller 10307d2c3f54SStephan Mueller af_alg_free_areq_sgls(areq); 10317d2c3f54SStephan Mueller sock_kfree_s(sk, areq, areq->areqlen); 10327d2c3f54SStephan Mueller } 10337d2c3f54SStephan Mueller EXPORT_SYMBOL_GPL(af_alg_free_resources); 10347d2c3f54SStephan Mueller 10357d2c3f54SStephan Mueller /** 10362d97591eSStephan Mueller * af_alg_async_cb - AIO callback handler 10372d97591eSStephan Mueller * 10382d97591eSStephan Mueller * This handler cleans up the struct af_alg_async_req upon completion of the 10392d97591eSStephan Mueller * AIO operation. 10402d97591eSStephan Mueller * 10412d97591eSStephan Mueller * The number of bytes to be generated with the AIO operation must be set 10422d97591eSStephan Mueller * in areq->outlen before the AIO callback handler is invoked. 10432d97591eSStephan Mueller */ 10442d97591eSStephan Mueller void af_alg_async_cb(struct crypto_async_request *_req, int err) 10452d97591eSStephan Mueller { 10462d97591eSStephan Mueller struct af_alg_async_req *areq = _req->data; 10472d97591eSStephan Mueller struct sock *sk = areq->sk; 10482d97591eSStephan Mueller struct kiocb *iocb = areq->iocb; 10492d97591eSStephan Mueller unsigned int resultlen; 10502d97591eSStephan Mueller 10512d97591eSStephan Mueller /* Buffer size written by crypto operation. */ 10522d97591eSStephan Mueller resultlen = areq->outlen; 10532d97591eSStephan Mueller 10547d2c3f54SStephan Mueller af_alg_free_resources(areq); 10557d2c3f54SStephan Mueller sock_put(sk); 10562d97591eSStephan Mueller 10572d97591eSStephan Mueller iocb->ki_complete(iocb, err ? err : resultlen, 0); 10582d97591eSStephan Mueller } 10592d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_async_cb); 10602d97591eSStephan Mueller 10612d97591eSStephan Mueller /** 10622d97591eSStephan Mueller * af_alg_poll - poll system call handler 10632d97591eSStephan Mueller */ 10642d97591eSStephan Mueller unsigned int af_alg_poll(struct file *file, struct socket *sock, 10652d97591eSStephan Mueller poll_table *wait) 10662d97591eSStephan Mueller { 10672d97591eSStephan Mueller struct sock *sk = sock->sk; 10682d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 10692d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 10702d97591eSStephan Mueller unsigned int mask; 10712d97591eSStephan Mueller 10722d97591eSStephan Mueller sock_poll_wait(file, sk_sleep(sk), wait); 10732d97591eSStephan Mueller mask = 0; 10742d97591eSStephan Mueller 10752d97591eSStephan Mueller if (!ctx->more || ctx->used) 10762d97591eSStephan Mueller mask |= POLLIN | POLLRDNORM; 10772d97591eSStephan Mueller 10782d97591eSStephan Mueller if (af_alg_writable(sk)) 10792d97591eSStephan Mueller mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 10802d97591eSStephan Mueller 10812d97591eSStephan Mueller return mask; 10822d97591eSStephan Mueller } 10832d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_poll); 10842d97591eSStephan Mueller 10852d97591eSStephan Mueller /** 10862d97591eSStephan Mueller * af_alg_alloc_areq - allocate struct af_alg_async_req 10872d97591eSStephan Mueller * 10882d97591eSStephan Mueller * @sk socket of connection to user space 10892d97591eSStephan Mueller * @areqlen size of struct af_alg_async_req + crypto_*_reqsize 10902d97591eSStephan Mueller * @return allocated data structure or ERR_PTR upon error 10912d97591eSStephan Mueller */ 10922d97591eSStephan Mueller struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, 10932d97591eSStephan Mueller unsigned int areqlen) 10942d97591eSStephan Mueller { 10952d97591eSStephan Mueller struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); 10962d97591eSStephan Mueller 10972d97591eSStephan Mueller if (unlikely(!areq)) 10982d97591eSStephan Mueller return ERR_PTR(-ENOMEM); 10992d97591eSStephan Mueller 11002d97591eSStephan Mueller areq->areqlen = areqlen; 11012d97591eSStephan Mueller areq->sk = sk; 11022d97591eSStephan Mueller areq->last_rsgl = NULL; 11032d97591eSStephan Mueller INIT_LIST_HEAD(&areq->rsgl_list); 11042d97591eSStephan Mueller areq->tsgl = NULL; 11052d97591eSStephan Mueller areq->tsgl_entries = 0; 11062d97591eSStephan Mueller 11072d97591eSStephan Mueller return areq; 11082d97591eSStephan Mueller } 11092d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_alloc_areq); 11102d97591eSStephan Mueller 11112d97591eSStephan Mueller /** 11122d97591eSStephan Mueller * af_alg_get_rsgl - create the RX SGL for the output data from the crypto 11132d97591eSStephan Mueller * operation 11142d97591eSStephan Mueller * 11152d97591eSStephan Mueller * @sk socket of connection to user space 11162d97591eSStephan Mueller * @msg user space message 11172d97591eSStephan Mueller * @flags flags used to invoke recvmsg with 11182d97591eSStephan Mueller * @areq instance of the cryptographic request that will hold the RX SGL 11192d97591eSStephan Mueller * @maxsize maximum number of bytes to be pulled from user space 11202d97591eSStephan Mueller * @outlen number of bytes in the RX SGL 11212d97591eSStephan Mueller * @return 0 on success, < 0 upon error 11222d97591eSStephan Mueller */ 11232d97591eSStephan Mueller int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, 11242d97591eSStephan Mueller struct af_alg_async_req *areq, size_t maxsize, 11252d97591eSStephan Mueller size_t *outlen) 11262d97591eSStephan Mueller { 11272d97591eSStephan Mueller struct alg_sock *ask = alg_sk(sk); 11282d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 11292d97591eSStephan Mueller size_t len = 0; 11302d97591eSStephan Mueller 11312d97591eSStephan Mueller while (maxsize > len && msg_data_left(msg)) { 11322d97591eSStephan Mueller struct af_alg_rsgl *rsgl; 11332d97591eSStephan Mueller size_t seglen; 11342d97591eSStephan Mueller int err; 11352d97591eSStephan Mueller 11362d97591eSStephan Mueller /* limit the amount of readable buffers */ 11372d97591eSStephan Mueller if (!af_alg_readable(sk)) 11382d97591eSStephan Mueller break; 11392d97591eSStephan Mueller 11402d97591eSStephan Mueller if (!ctx->used) { 11412d97591eSStephan Mueller err = af_alg_wait_for_data(sk, flags); 11422d97591eSStephan Mueller if (err) 11432d97591eSStephan Mueller return err; 11442d97591eSStephan Mueller } 11452d97591eSStephan Mueller 11462d97591eSStephan Mueller seglen = min_t(size_t, (maxsize - len), 11472d97591eSStephan Mueller msg_data_left(msg)); 11482d97591eSStephan Mueller 11492d97591eSStephan Mueller if (list_empty(&areq->rsgl_list)) { 11502d97591eSStephan Mueller rsgl = &areq->first_rsgl; 11512d97591eSStephan Mueller } else { 11522d97591eSStephan Mueller rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); 11532d97591eSStephan Mueller if (unlikely(!rsgl)) 11542d97591eSStephan Mueller return -ENOMEM; 11552d97591eSStephan Mueller } 11562d97591eSStephan Mueller 11572d97591eSStephan Mueller rsgl->sgl.npages = 0; 11582d97591eSStephan Mueller list_add_tail(&rsgl->list, &areq->rsgl_list); 11592d97591eSStephan Mueller 11602d97591eSStephan Mueller /* make one iovec available as scatterlist */ 11612d97591eSStephan Mueller err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); 11622d97591eSStephan Mueller if (err < 0) 11632d97591eSStephan Mueller return err; 11642d97591eSStephan Mueller 11652d97591eSStephan Mueller /* chain the new scatterlist with previous one */ 11662d97591eSStephan Mueller if (areq->last_rsgl) 11672d97591eSStephan Mueller af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); 11682d97591eSStephan Mueller 11692d97591eSStephan Mueller areq->last_rsgl = rsgl; 11702d97591eSStephan Mueller len += err; 11712d97591eSStephan Mueller ctx->rcvused += err; 11722d97591eSStephan Mueller rsgl->sg_num_bytes = err; 11732d97591eSStephan Mueller iov_iter_advance(&msg->msg_iter, err); 11742d97591eSStephan Mueller } 11752d97591eSStephan Mueller 11762d97591eSStephan Mueller *outlen = len; 11772d97591eSStephan Mueller return 0; 11782d97591eSStephan Mueller } 11792d97591eSStephan Mueller EXPORT_SYMBOL_GPL(af_alg_get_rsgl); 11802d97591eSStephan Mueller 118103c8efc1SHerbert Xu static int __init af_alg_init(void) 118203c8efc1SHerbert Xu { 118303c8efc1SHerbert Xu int err = proto_register(&alg_proto, 0); 118403c8efc1SHerbert Xu 118503c8efc1SHerbert Xu if (err) 118603c8efc1SHerbert Xu goto out; 118703c8efc1SHerbert Xu 118803c8efc1SHerbert Xu err = sock_register(&alg_family); 118903c8efc1SHerbert Xu if (err != 0) 119003c8efc1SHerbert Xu goto out_unregister_proto; 119103c8efc1SHerbert Xu 119203c8efc1SHerbert Xu out: 119303c8efc1SHerbert Xu return err; 119403c8efc1SHerbert Xu 119503c8efc1SHerbert Xu out_unregister_proto: 119603c8efc1SHerbert Xu proto_unregister(&alg_proto); 119703c8efc1SHerbert Xu goto out; 119803c8efc1SHerbert Xu } 119903c8efc1SHerbert Xu 120003c8efc1SHerbert Xu static void __exit af_alg_exit(void) 120103c8efc1SHerbert Xu { 120203c8efc1SHerbert Xu sock_unregister(PF_ALG); 120303c8efc1SHerbert Xu proto_unregister(&alg_proto); 120403c8efc1SHerbert Xu } 120503c8efc1SHerbert Xu 120603c8efc1SHerbert Xu module_init(af_alg_init); 120703c8efc1SHerbert Xu module_exit(af_alg_exit); 120803c8efc1SHerbert Xu MODULE_LICENSE("GPL"); 120903c8efc1SHerbert Xu MODULE_ALIAS_NETPROTO(AF_ALG); 1210