18ff59090SHerbert Xu /* 28ff59090SHerbert Xu * algif_skcipher: User-space interface for skcipher algorithms 38ff59090SHerbert Xu * 48ff59090SHerbert Xu * This file provides the user-space API for symmetric key ciphers. 58ff59090SHerbert Xu * 68ff59090SHerbert Xu * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> 78ff59090SHerbert Xu * 88ff59090SHerbert Xu * This program is free software; you can redistribute it and/or modify it 98ff59090SHerbert Xu * under the terms of the GNU General Public License as published by the Free 108ff59090SHerbert Xu * Software Foundation; either version 2 of the License, or (at your option) 118ff59090SHerbert Xu * any later version. 128ff59090SHerbert Xu * 13e870456dSStephan Mueller * The following concept of the memory management is used: 14e870456dSStephan Mueller * 15e870456dSStephan Mueller * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is 16e870456dSStephan Mueller * filled by user space with the data submitted via sendpage/sendmsg. Filling 17e870456dSStephan Mueller * up the TX SGL does not cause a crypto operation -- the data will only be 18e870456dSStephan Mueller * tracked by the kernel. Upon receipt of one recvmsg call, the caller must 19e870456dSStephan Mueller * provide a buffer which is tracked with the RX SGL. 20e870456dSStephan Mueller * 21e870456dSStephan Mueller * During the processing of the recvmsg operation, the cipher request is 22e870456dSStephan Mueller * allocated and prepared. As part of the recvmsg operation, the processed 23e870456dSStephan Mueller * TX buffers are extracted from the TX SGL into a separate SGL. 24e870456dSStephan Mueller * 25e870456dSStephan Mueller * After the completion of the crypto operation, the RX SGL and the cipher 26e870456dSStephan Mueller * request is released. The extracted TX SGL parts are released together with 27e870456dSStephan Mueller * the RX SGL release. 288ff59090SHerbert Xu */ 298ff59090SHerbert Xu 308ff59090SHerbert Xu #include <crypto/scatterwalk.h> 318ff59090SHerbert Xu #include <crypto/skcipher.h> 328ff59090SHerbert Xu #include <crypto/if_alg.h> 338ff59090SHerbert Xu #include <linux/init.h> 348ff59090SHerbert Xu #include <linux/list.h> 358ff59090SHerbert Xu #include <linux/kernel.h> 368ff59090SHerbert Xu #include <linux/mm.h> 378ff59090SHerbert Xu #include <linux/module.h> 388ff59090SHerbert Xu #include <linux/net.h> 398ff59090SHerbert Xu #include <net/sock.h> 408ff59090SHerbert Xu 411b784140SYing Xue static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, 421b784140SYing Xue size_t size) 438ff59090SHerbert Xu { 448ff59090SHerbert Xu struct sock *sk = sock->sk; 458ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 466454c2b8SHerbert Xu struct sock *psk = ask->parent; 476454c2b8SHerbert Xu struct alg_sock *pask = alg_sk(psk); 48f8d33facSEric Biggers struct crypto_skcipher *tfm = pask->private; 490d96e4baSHerbert Xu unsigned ivsize = crypto_skcipher_ivsize(tfm); 508ff59090SHerbert Xu 512d97591eSStephan Mueller return af_alg_sendmsg(sock, msg, size, ivsize); 52a596999bSTadeusz Struk } 53a596999bSTadeusz Struk 54e870456dSStephan Mueller static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 55e870456dSStephan Mueller size_t ignored, int flags) 56a596999bSTadeusz Struk { 57a596999bSTadeusz Struk struct sock *sk = sock->sk; 58a596999bSTadeusz Struk struct alg_sock *ask = alg_sk(sk); 59ec69bbfbSHerbert Xu struct sock *psk = ask->parent; 60ec69bbfbSHerbert Xu struct alg_sock *pask = alg_sk(psk); 612d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 62f8d33facSEric Biggers struct crypto_skcipher *tfm = pask->private; 63e870456dSStephan Mueller unsigned int bs = crypto_skcipher_blocksize(tfm); 642d97591eSStephan Mueller struct af_alg_async_req *areq; 65e870456dSStephan Mueller int err = 0; 66e870456dSStephan Mueller size_t len = 0; 67ec69bbfbSHerbert Xu 6811edb555SStephan Mueller if (!ctx->used) { 6911edb555SStephan Mueller err = af_alg_wait_for_data(sk, flags); 7011edb555SStephan Mueller if (err) 7111edb555SStephan Mueller return err; 7211edb555SStephan Mueller } 7311edb555SStephan Mueller 74e870456dSStephan Mueller /* Allocate cipher request for current operation. */ 752d97591eSStephan Mueller areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + 762d97591eSStephan Mueller crypto_skcipher_reqsize(tfm)); 772d97591eSStephan Mueller if (IS_ERR(areq)) 782d97591eSStephan Mueller return PTR_ERR(areq); 79ec69bbfbSHerbert Xu 80e870456dSStephan Mueller /* convert iovecs of output buffers into RX SGL */ 812d97591eSStephan Mueller err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len); 82a596999bSTadeusz Struk if (err) 83a596999bSTadeusz Struk goto free; 84a596999bSTadeusz Struk 85e870456dSStephan Mueller /* Process only as much RX buffers for which we have TX data */ 86e870456dSStephan Mueller if (len > ctx->used) 87e870456dSStephan Mueller len = ctx->used; 88033f46b3Stadeusz.struk@intel.com 89e870456dSStephan Mueller /* 90e870456dSStephan Mueller * If more buffers are to be expected to be processed, process only 91e870456dSStephan Mueller * full block size buffers. 92e870456dSStephan Mueller */ 93e870456dSStephan Mueller if (ctx->more || len < ctx->used) 94e870456dSStephan Mueller len -= len % bs; 95a596999bSTadeusz Struk 96e870456dSStephan Mueller /* 97e870456dSStephan Mueller * Create a per request TX SGL for this request which tracks the 98e870456dSStephan Mueller * SG entries from the global TX SGL. 99e870456dSStephan Mueller */ 1002d97591eSStephan Mueller areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0); 101e870456dSStephan Mueller if (!areq->tsgl_entries) 102e870456dSStephan Mueller areq->tsgl_entries = 1; 10376e43e37SKees Cook areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl), 10476e43e37SKees Cook areq->tsgl_entries), 105e870456dSStephan Mueller GFP_KERNEL); 106e870456dSStephan Mueller if (!areq->tsgl) { 107e870456dSStephan Mueller err = -ENOMEM; 108bc97e57eSHerbert Xu goto free; 109e870456dSStephan Mueller } 110e870456dSStephan Mueller sg_init_table(areq->tsgl, areq->tsgl_entries); 1112d97591eSStephan Mueller af_alg_pull_tsgl(sk, len, areq->tsgl, 0); 1128ff59090SHerbert Xu 113e870456dSStephan Mueller /* Initialize the crypto operation */ 1142d97591eSStephan Mueller skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm); 1152d97591eSStephan Mueller skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl, 1162d97591eSStephan Mueller areq->first_rsgl.sgl.sg, len, ctx->iv); 1174f0414e5SHerbert Xu 118e870456dSStephan Mueller if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { 119e870456dSStephan Mueller /* AIO operation */ 1207d2c3f54SStephan Mueller sock_hold(sk); 121e870456dSStephan Mueller areq->iocb = msg->msg_iocb; 122d53c5135SStephan Mueller 123d53c5135SStephan Mueller /* Remember output size that will be generated. */ 124d53c5135SStephan Mueller areq->outlen = len; 125d53c5135SStephan Mueller 1262d97591eSStephan Mueller skcipher_request_set_callback(&areq->cra_u.skcipher_req, 127e870456dSStephan Mueller CRYPTO_TFM_REQ_MAY_SLEEP, 1282d97591eSStephan Mueller af_alg_async_cb, areq); 1292d97591eSStephan Mueller err = ctx->enc ? 1302d97591eSStephan Mueller crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : 1312d97591eSStephan Mueller crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); 1327d2c3f54SStephan Mueller 1337d2c3f54SStephan Mueller /* AIO operation in progress */ 134d53c5135SStephan Mueller if (err == -EINPROGRESS || err == -EBUSY) 1357d2c3f54SStephan Mueller return -EIOCBQUEUED; 1367d2c3f54SStephan Mueller 1377d2c3f54SStephan Mueller sock_put(sk); 138e870456dSStephan Mueller } else { 139e870456dSStephan Mueller /* Synchronous operation */ 1402d97591eSStephan Mueller skcipher_request_set_callback(&areq->cra_u.skcipher_req, 141e870456dSStephan Mueller CRYPTO_TFM_REQ_MAY_SLEEP | 142e870456dSStephan Mueller CRYPTO_TFM_REQ_MAY_BACKLOG, 1432c3f8b16SGilad Ben-Yossef crypto_req_done, &ctx->wait); 1442c3f8b16SGilad Ben-Yossef err = crypto_wait_req(ctx->enc ? 1452d97591eSStephan Mueller crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) : 1462d97591eSStephan Mueller crypto_skcipher_decrypt(&areq->cra_u.skcipher_req), 1472c3f8b16SGilad Ben-Yossef &ctx->wait); 1488ff59090SHerbert Xu } 1498ff59090SHerbert Xu 1508ff59090SHerbert Xu 151e870456dSStephan Mueller free: 1527d2c3f54SStephan Mueller af_alg_free_resources(areq); 1538ff59090SHerbert Xu 154e870456dSStephan Mueller return err ? err : len; 1558ff59090SHerbert Xu } 1568ff59090SHerbert Xu 157a596999bSTadeusz Struk static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, 158a596999bSTadeusz Struk size_t ignored, int flags) 159a596999bSTadeusz Struk { 160e870456dSStephan Mueller struct sock *sk = sock->sk; 161e870456dSStephan Mueller int ret = 0; 162e870456dSStephan Mueller 163e870456dSStephan Mueller lock_sock(sk); 164e870456dSStephan Mueller while (msg_data_left(msg)) { 165e870456dSStephan Mueller int err = _skcipher_recvmsg(sock, msg, ignored, flags); 166e870456dSStephan Mueller 167e870456dSStephan Mueller /* 168e870456dSStephan Mueller * This error covers -EIOCBQUEUED which implies that we can 169e870456dSStephan Mueller * only handle one AIO request. If the caller wants to have 170e870456dSStephan Mueller * multiple AIO requests in parallel, he must make multiple 171e870456dSStephan Mueller * separate AIO calls. 1725703c826SStephan Mueller * 1735703c826SStephan Mueller * Also return the error if no data has been processed so far. 174e870456dSStephan Mueller */ 175e870456dSStephan Mueller if (err <= 0) { 1765703c826SStephan Mueller if (err == -EIOCBQUEUED || !ret) 177e870456dSStephan Mueller ret = err; 178e870456dSStephan Mueller goto out; 179e870456dSStephan Mueller } 180e870456dSStephan Mueller 181e870456dSStephan Mueller ret += err; 182e870456dSStephan Mueller } 183e870456dSStephan Mueller 184e870456dSStephan Mueller out: 1852d97591eSStephan Mueller af_alg_wmem_wakeup(sk); 186e870456dSStephan Mueller release_sock(sk); 187e870456dSStephan Mueller return ret; 188a596999bSTadeusz Struk } 1898ff59090SHerbert Xu 1908ff59090SHerbert Xu static struct proto_ops algif_skcipher_ops = { 1918ff59090SHerbert Xu .family = PF_ALG, 1928ff59090SHerbert Xu 1938ff59090SHerbert Xu .connect = sock_no_connect, 1948ff59090SHerbert Xu .socketpair = sock_no_socketpair, 1958ff59090SHerbert Xu .getname = sock_no_getname, 1968ff59090SHerbert Xu .ioctl = sock_no_ioctl, 1978ff59090SHerbert Xu .listen = sock_no_listen, 1988ff59090SHerbert Xu .shutdown = sock_no_shutdown, 1998ff59090SHerbert Xu .getsockopt = sock_no_getsockopt, 2008ff59090SHerbert Xu .mmap = sock_no_mmap, 2018ff59090SHerbert Xu .bind = sock_no_bind, 2028ff59090SHerbert Xu .accept = sock_no_accept, 2038ff59090SHerbert Xu .setsockopt = sock_no_setsockopt, 2048ff59090SHerbert Xu 2058ff59090SHerbert Xu .release = af_alg_release, 2068ff59090SHerbert Xu .sendmsg = skcipher_sendmsg, 2072d97591eSStephan Mueller .sendpage = af_alg_sendpage, 2088ff59090SHerbert Xu .recvmsg = skcipher_recvmsg, 209*a11e1d43SLinus Torvalds .poll = af_alg_poll, 2108ff59090SHerbert Xu }; 2118ff59090SHerbert Xu 212a0fa2d03SHerbert Xu static int skcipher_check_key(struct socket *sock) 213a0fa2d03SHerbert Xu { 2141822793aSHerbert Xu int err = 0; 215a0fa2d03SHerbert Xu struct sock *psk; 216a0fa2d03SHerbert Xu struct alg_sock *pask; 217f8d33facSEric Biggers struct crypto_skcipher *tfm; 218a0fa2d03SHerbert Xu struct sock *sk = sock->sk; 219a0fa2d03SHerbert Xu struct alg_sock *ask = alg_sk(sk); 220a0fa2d03SHerbert Xu 2211822793aSHerbert Xu lock_sock(sk); 222a0fa2d03SHerbert Xu if (ask->refcnt) 2231822793aSHerbert Xu goto unlock_child; 224a0fa2d03SHerbert Xu 225a0fa2d03SHerbert Xu psk = ask->parent; 226a0fa2d03SHerbert Xu pask = alg_sk(ask->parent); 227a0fa2d03SHerbert Xu tfm = pask->private; 228a0fa2d03SHerbert Xu 229a0fa2d03SHerbert Xu err = -ENOKEY; 2301822793aSHerbert Xu lock_sock_nested(psk, SINGLE_DEPTH_NESTING); 231f8d33facSEric Biggers if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 232a0fa2d03SHerbert Xu goto unlock; 233a0fa2d03SHerbert Xu 234a0fa2d03SHerbert Xu if (!pask->refcnt++) 235a0fa2d03SHerbert Xu sock_hold(psk); 236a0fa2d03SHerbert Xu 237a0fa2d03SHerbert Xu ask->refcnt = 1; 238a0fa2d03SHerbert Xu sock_put(psk); 239a0fa2d03SHerbert Xu 240a0fa2d03SHerbert Xu err = 0; 241a0fa2d03SHerbert Xu 242a0fa2d03SHerbert Xu unlock: 243a0fa2d03SHerbert Xu release_sock(psk); 2441822793aSHerbert Xu unlock_child: 2451822793aSHerbert Xu release_sock(sk); 246a0fa2d03SHerbert Xu 247a0fa2d03SHerbert Xu return err; 248a0fa2d03SHerbert Xu } 249a0fa2d03SHerbert Xu 250a0fa2d03SHerbert Xu static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, 251a0fa2d03SHerbert Xu size_t size) 252a0fa2d03SHerbert Xu { 253a0fa2d03SHerbert Xu int err; 254a0fa2d03SHerbert Xu 255a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 256a0fa2d03SHerbert Xu if (err) 257a0fa2d03SHerbert Xu return err; 258a0fa2d03SHerbert Xu 259a0fa2d03SHerbert Xu return skcipher_sendmsg(sock, msg, size); 260a0fa2d03SHerbert Xu } 261a0fa2d03SHerbert Xu 262a0fa2d03SHerbert Xu static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, 263a0fa2d03SHerbert Xu int offset, size_t size, int flags) 264a0fa2d03SHerbert Xu { 265a0fa2d03SHerbert Xu int err; 266a0fa2d03SHerbert Xu 267a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 268a0fa2d03SHerbert Xu if (err) 269a0fa2d03SHerbert Xu return err; 270a0fa2d03SHerbert Xu 2712d97591eSStephan Mueller return af_alg_sendpage(sock, page, offset, size, flags); 272a0fa2d03SHerbert Xu } 273a0fa2d03SHerbert Xu 274a0fa2d03SHerbert Xu static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, 275a0fa2d03SHerbert Xu size_t ignored, int flags) 276a0fa2d03SHerbert Xu { 277a0fa2d03SHerbert Xu int err; 278a0fa2d03SHerbert Xu 279a0fa2d03SHerbert Xu err = skcipher_check_key(sock); 280a0fa2d03SHerbert Xu if (err) 281a0fa2d03SHerbert Xu return err; 282a0fa2d03SHerbert Xu 283a0fa2d03SHerbert Xu return skcipher_recvmsg(sock, msg, ignored, flags); 284a0fa2d03SHerbert Xu } 285a0fa2d03SHerbert Xu 286a0fa2d03SHerbert Xu static struct proto_ops algif_skcipher_ops_nokey = { 287a0fa2d03SHerbert Xu .family = PF_ALG, 288a0fa2d03SHerbert Xu 289a0fa2d03SHerbert Xu .connect = sock_no_connect, 290a0fa2d03SHerbert Xu .socketpair = sock_no_socketpair, 291a0fa2d03SHerbert Xu .getname = sock_no_getname, 292a0fa2d03SHerbert Xu .ioctl = sock_no_ioctl, 293a0fa2d03SHerbert Xu .listen = sock_no_listen, 294a0fa2d03SHerbert Xu .shutdown = sock_no_shutdown, 295a0fa2d03SHerbert Xu .getsockopt = sock_no_getsockopt, 296a0fa2d03SHerbert Xu .mmap = sock_no_mmap, 297a0fa2d03SHerbert Xu .bind = sock_no_bind, 298a0fa2d03SHerbert Xu .accept = sock_no_accept, 299a0fa2d03SHerbert Xu .setsockopt = sock_no_setsockopt, 300a0fa2d03SHerbert Xu 301a0fa2d03SHerbert Xu .release = af_alg_release, 302a0fa2d03SHerbert Xu .sendmsg = skcipher_sendmsg_nokey, 303a0fa2d03SHerbert Xu .sendpage = skcipher_sendpage_nokey, 304a0fa2d03SHerbert Xu .recvmsg = skcipher_recvmsg_nokey, 305*a11e1d43SLinus Torvalds .poll = af_alg_poll, 306a0fa2d03SHerbert Xu }; 307a0fa2d03SHerbert Xu 3088ff59090SHerbert Xu static void *skcipher_bind(const char *name, u32 type, u32 mask) 3098ff59090SHerbert Xu { 310f8d33facSEric Biggers return crypto_alloc_skcipher(name, type, mask); 3118ff59090SHerbert Xu } 3128ff59090SHerbert Xu 3138ff59090SHerbert Xu static void skcipher_release(void *private) 3148ff59090SHerbert Xu { 315f8d33facSEric Biggers crypto_free_skcipher(private); 3168ff59090SHerbert Xu } 3178ff59090SHerbert Xu 3188ff59090SHerbert Xu static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) 3198ff59090SHerbert Xu { 320f8d33facSEric Biggers return crypto_skcipher_setkey(private, key, keylen); 3218ff59090SHerbert Xu } 3228ff59090SHerbert Xu 3238ff59090SHerbert Xu static void skcipher_sock_destruct(struct sock *sk) 3248ff59090SHerbert Xu { 3258ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 3262d97591eSStephan Mueller struct af_alg_ctx *ctx = ask->private; 327e870456dSStephan Mueller struct sock *psk = ask->parent; 328e870456dSStephan Mueller struct alg_sock *pask = alg_sk(psk); 329f8d33facSEric Biggers struct crypto_skcipher *tfm = pask->private; 3308ff59090SHerbert Xu 3312d97591eSStephan Mueller af_alg_pull_tsgl(sk, ctx->used, NULL, 0); 3320d96e4baSHerbert Xu sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); 3338ff59090SHerbert Xu sock_kfree_s(sk, ctx, ctx->len); 3348ff59090SHerbert Xu af_alg_release_parent(sk); 3358ff59090SHerbert Xu } 3368ff59090SHerbert Xu 337d7b65aeeSHerbert Xu static int skcipher_accept_parent_nokey(void *private, struct sock *sk) 3388ff59090SHerbert Xu { 3392d97591eSStephan Mueller struct af_alg_ctx *ctx; 3408ff59090SHerbert Xu struct alg_sock *ask = alg_sk(sk); 341f8d33facSEric Biggers struct crypto_skcipher *tfm = private; 342e870456dSStephan Mueller unsigned int len = sizeof(*ctx); 3438ff59090SHerbert Xu 3448ff59090SHerbert Xu ctx = sock_kmalloc(sk, len, GFP_KERNEL); 3458ff59090SHerbert Xu if (!ctx) 3468ff59090SHerbert Xu return -ENOMEM; 3478ff59090SHerbert Xu 348f8d33facSEric Biggers ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm), 3498ff59090SHerbert Xu GFP_KERNEL); 3508ff59090SHerbert Xu if (!ctx->iv) { 3518ff59090SHerbert Xu sock_kfree_s(sk, ctx, len); 3528ff59090SHerbert Xu return -ENOMEM; 3538ff59090SHerbert Xu } 3548ff59090SHerbert Xu 355f8d33facSEric Biggers memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm)); 3568ff59090SHerbert Xu 357e870456dSStephan Mueller INIT_LIST_HEAD(&ctx->tsgl_list); 3588ff59090SHerbert Xu ctx->len = len; 3598ff59090SHerbert Xu ctx->used = 0; 360af955bf1SJonathan Cameron atomic_set(&ctx->rcvused, 0); 3618ff59090SHerbert Xu ctx->more = 0; 3628ff59090SHerbert Xu ctx->merge = 0; 3638ff59090SHerbert Xu ctx->enc = 0; 3642c3f8b16SGilad Ben-Yossef crypto_init_wait(&ctx->wait); 3658ff59090SHerbert Xu 3668ff59090SHerbert Xu ask->private = ctx; 3678ff59090SHerbert Xu 3688ff59090SHerbert Xu sk->sk_destruct = skcipher_sock_destruct; 3698ff59090SHerbert Xu 3708ff59090SHerbert Xu return 0; 3718ff59090SHerbert Xu } 3728ff59090SHerbert Xu 373a0fa2d03SHerbert Xu static int skcipher_accept_parent(void *private, struct sock *sk) 374a0fa2d03SHerbert Xu { 375f8d33facSEric Biggers struct crypto_skcipher *tfm = private; 376a0fa2d03SHerbert Xu 377f8d33facSEric Biggers if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) 378a0fa2d03SHerbert Xu return -ENOKEY; 379a0fa2d03SHerbert Xu 380d7b65aeeSHerbert Xu return skcipher_accept_parent_nokey(private, sk); 381a0fa2d03SHerbert Xu } 382a0fa2d03SHerbert Xu 3838ff59090SHerbert Xu static const struct af_alg_type algif_type_skcipher = { 3848ff59090SHerbert Xu .bind = skcipher_bind, 3858ff59090SHerbert Xu .release = skcipher_release, 3868ff59090SHerbert Xu .setkey = skcipher_setkey, 3878ff59090SHerbert Xu .accept = skcipher_accept_parent, 388a0fa2d03SHerbert Xu .accept_nokey = skcipher_accept_parent_nokey, 3898ff59090SHerbert Xu .ops = &algif_skcipher_ops, 390a0fa2d03SHerbert Xu .ops_nokey = &algif_skcipher_ops_nokey, 3918ff59090SHerbert Xu .name = "skcipher", 3928ff59090SHerbert Xu .owner = THIS_MODULE 3938ff59090SHerbert Xu }; 3948ff59090SHerbert Xu 3958ff59090SHerbert Xu static int __init algif_skcipher_init(void) 3968ff59090SHerbert Xu { 3978ff59090SHerbert Xu return af_alg_register_type(&algif_type_skcipher); 3988ff59090SHerbert Xu } 3998ff59090SHerbert Xu 4008ff59090SHerbert Xu static void __exit algif_skcipher_exit(void) 4018ff59090SHerbert Xu { 4028ff59090SHerbert Xu int err = af_alg_unregister_type(&algif_type_skcipher); 4038ff59090SHerbert Xu BUG_ON(err); 4048ff59090SHerbert Xu } 4058ff59090SHerbert Xu 4068ff59090SHerbert Xu module_init(algif_skcipher_init); 4078ff59090SHerbert Xu module_exit(algif_skcipher_exit); 4088ff59090SHerbert Xu MODULE_LICENSE("GPL"); 409