1004a403cSLoc Ho /* 2004a403cSLoc Ho * Asynchronous Cryptographic Hash operations. 3004a403cSLoc Ho * 4004a403cSLoc Ho * This is the asynchronous version of hash.c with notification of 5004a403cSLoc Ho * completion via a callback. 6004a403cSLoc Ho * 7004a403cSLoc Ho * Copyright (c) 2008 Loc Ho <lho@amcc.com> 8004a403cSLoc Ho * 9004a403cSLoc Ho * This program is free software; you can redistribute it and/or modify it 10004a403cSLoc Ho * under the terms of the GNU General Public License as published by the Free 11004a403cSLoc Ho * Software Foundation; either version 2 of the License, or (at your option) 12004a403cSLoc Ho * any later version. 13004a403cSLoc Ho * 14004a403cSLoc Ho */ 15004a403cSLoc Ho 1620036252SHerbert Xu #include <crypto/internal/hash.h> 1720036252SHerbert Xu #include <crypto/scatterwalk.h> 1875ecb231SHerbert Xu #include <linux/bug.h> 19004a403cSLoc Ho #include <linux/err.h> 20004a403cSLoc Ho #include <linux/kernel.h> 21004a403cSLoc Ho #include <linux/module.h> 22004a403cSLoc Ho #include <linux/sched.h> 23004a403cSLoc Ho #include <linux/slab.h> 24004a403cSLoc Ho #include <linux/seq_file.h> 256238cbaeSSteffen Klassert #include <linux/cryptouser.h> 26*d8c34b94SGideon Israel Dsouza #include <linux/compiler.h> 276238cbaeSSteffen Klassert #include <net/netlink.h> 28004a403cSLoc Ho 29004a403cSLoc Ho #include "internal.h" 30004a403cSLoc Ho 3166f6ce5eSHerbert Xu struct ahash_request_priv { 3266f6ce5eSHerbert Xu crypto_completion_t complete; 3366f6ce5eSHerbert Xu void *data; 3466f6ce5eSHerbert Xu u8 *result; 3566f6ce5eSHerbert Xu void *ubuf[] CRYPTO_MINALIGN_ATTR; 3666f6ce5eSHerbert Xu }; 3766f6ce5eSHerbert Xu 3888056ec3SHerbert Xu static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) 3988056ec3SHerbert Xu { 4088056ec3SHerbert Xu return container_of(crypto_hash_alg_common(hash), struct ahash_alg, 4188056ec3SHerbert Xu halg); 4288056ec3SHerbert Xu } 4388056ec3SHerbert Xu 4420036252SHerbert Xu static int hash_walk_next(struct crypto_hash_walk *walk) 4520036252SHerbert Xu { 4620036252SHerbert Xu unsigned int alignmask = walk->alignmask; 4720036252SHerbert Xu unsigned int offset = walk->offset; 4820036252SHerbert Xu unsigned int nbytes = min(walk->entrylen, 4920036252SHerbert Xu ((unsigned int)(PAGE_SIZE)) - offset); 5020036252SHerbert Xu 5175ecb231SHerbert Xu if (walk->flags & CRYPTO_ALG_ASYNC) 5275ecb231SHerbert Xu walk->data = kmap(walk->pg); 5375ecb231SHerbert Xu else 54f0dfc0b0SCong Wang walk->data = kmap_atomic(walk->pg); 5520036252SHerbert Xu walk->data += offset; 5620036252SHerbert Xu 5723a75eeeSSzilveszter Ördög if (offset & alignmask) { 5823a75eeeSSzilveszter Ördög unsigned int unaligned = alignmask + 1 - (offset & alignmask); 59b516d514SJoshua I. James 6023a75eeeSSzilveszter Ördög if (nbytes > unaligned) 6123a75eeeSSzilveszter Ördög nbytes = unaligned; 6223a75eeeSSzilveszter Ördög } 6320036252SHerbert Xu 6420036252SHerbert Xu walk->entrylen -= nbytes; 6520036252SHerbert Xu return nbytes; 6620036252SHerbert Xu } 6720036252SHerbert Xu 6820036252SHerbert Xu static int hash_walk_new_entry(struct crypto_hash_walk *walk) 6920036252SHerbert Xu { 7020036252SHerbert Xu struct scatterlist *sg; 7120036252SHerbert Xu 7220036252SHerbert Xu sg = walk->sg; 7320036252SHerbert Xu walk->offset = sg->offset; 7413f4bb78SHerbert Xu walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 7513f4bb78SHerbert Xu walk->offset = offset_in_page(walk->offset); 7620036252SHerbert Xu walk->entrylen = sg->length; 7720036252SHerbert Xu 7820036252SHerbert Xu if (walk->entrylen > walk->total) 7920036252SHerbert Xu walk->entrylen = walk->total; 8020036252SHerbert Xu walk->total -= walk->entrylen; 8120036252SHerbert Xu 8220036252SHerbert Xu return hash_walk_next(walk); 8320036252SHerbert Xu } 8420036252SHerbert Xu 8520036252SHerbert Xu int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) 8620036252SHerbert Xu { 8720036252SHerbert Xu unsigned int alignmask = walk->alignmask; 8820036252SHerbert Xu unsigned int nbytes = walk->entrylen; 8920036252SHerbert Xu 9020036252SHerbert Xu walk->data -= walk->offset; 9120036252SHerbert Xu 9220036252SHerbert Xu if (nbytes && walk->offset & alignmask && !err) { 9320036252SHerbert Xu walk->offset = ALIGN(walk->offset, alignmask + 1); 9420036252SHerbert Xu walk->data += walk->offset; 9520036252SHerbert Xu 9620036252SHerbert Xu nbytes = min(nbytes, 9720036252SHerbert Xu ((unsigned int)(PAGE_SIZE)) - walk->offset); 9820036252SHerbert Xu walk->entrylen -= nbytes; 9920036252SHerbert Xu 10020036252SHerbert Xu return nbytes; 10120036252SHerbert Xu } 10220036252SHerbert Xu 10375ecb231SHerbert Xu if (walk->flags & CRYPTO_ALG_ASYNC) 10475ecb231SHerbert Xu kunmap(walk->pg); 10575ecb231SHerbert Xu else { 106f0dfc0b0SCong Wang kunmap_atomic(walk->data); 10775ecb231SHerbert Xu /* 10875ecb231SHerbert Xu * The may sleep test only makes sense for sync users. 10975ecb231SHerbert Xu * Async users don't need to sleep here anyway. 11075ecb231SHerbert Xu */ 11120036252SHerbert Xu crypto_yield(walk->flags); 11275ecb231SHerbert Xu } 11320036252SHerbert Xu 11420036252SHerbert Xu if (err) 11520036252SHerbert Xu return err; 11620036252SHerbert Xu 117d315a0e0SHerbert Xu if (nbytes) { 11820036252SHerbert Xu walk->offset = 0; 119d315a0e0SHerbert Xu walk->pg++; 12020036252SHerbert Xu return hash_walk_next(walk); 121d315a0e0SHerbert Xu } 12220036252SHerbert Xu 12320036252SHerbert Xu if (!walk->total) 12420036252SHerbert Xu return 0; 12520036252SHerbert Xu 1265be4d4c9SCristian Stoica walk->sg = sg_next(walk->sg); 12720036252SHerbert Xu 12820036252SHerbert Xu return hash_walk_new_entry(walk); 12920036252SHerbert Xu } 13020036252SHerbert Xu EXPORT_SYMBOL_GPL(crypto_hash_walk_done); 13120036252SHerbert Xu 13220036252SHerbert Xu int crypto_hash_walk_first(struct ahash_request *req, 13320036252SHerbert Xu struct crypto_hash_walk *walk) 13420036252SHerbert Xu { 13520036252SHerbert Xu walk->total = req->nbytes; 13620036252SHerbert Xu 1376d9529c5STim Chen if (!walk->total) { 1386d9529c5STim Chen walk->entrylen = 0; 13920036252SHerbert Xu return 0; 1406d9529c5STim Chen } 14120036252SHerbert Xu 14220036252SHerbert Xu walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 14320036252SHerbert Xu walk->sg = req->src; 14475ecb231SHerbert Xu walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; 14520036252SHerbert Xu 14620036252SHerbert Xu return hash_walk_new_entry(walk); 14720036252SHerbert Xu } 14820036252SHerbert Xu EXPORT_SYMBOL_GPL(crypto_hash_walk_first); 14920036252SHerbert Xu 15075ecb231SHerbert Xu int crypto_ahash_walk_first(struct ahash_request *req, 15175ecb231SHerbert Xu struct crypto_hash_walk *walk) 15275ecb231SHerbert Xu { 15375ecb231SHerbert Xu walk->total = req->nbytes; 15475ecb231SHerbert Xu 1556d9529c5STim Chen if (!walk->total) { 1566d9529c5STim Chen walk->entrylen = 0; 15775ecb231SHerbert Xu return 0; 1586d9529c5STim Chen } 15975ecb231SHerbert Xu 16075ecb231SHerbert Xu walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); 16175ecb231SHerbert Xu walk->sg = req->src; 16275ecb231SHerbert Xu walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; 16375ecb231SHerbert Xu walk->flags |= CRYPTO_ALG_ASYNC; 16475ecb231SHerbert Xu 16575ecb231SHerbert Xu BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); 16675ecb231SHerbert Xu 16775ecb231SHerbert Xu return hash_walk_new_entry(walk); 16875ecb231SHerbert Xu } 16975ecb231SHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); 17075ecb231SHerbert Xu 171004a403cSLoc Ho static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, 172004a403cSLoc Ho unsigned int keylen) 173004a403cSLoc Ho { 174004a403cSLoc Ho unsigned long alignmask = crypto_ahash_alignmask(tfm); 175004a403cSLoc Ho int ret; 176004a403cSLoc Ho u8 *buffer, *alignbuffer; 177004a403cSLoc Ho unsigned long absize; 178004a403cSLoc Ho 179004a403cSLoc Ho absize = keylen + alignmask; 180093900c2SHerbert Xu buffer = kmalloc(absize, GFP_KERNEL); 181004a403cSLoc Ho if (!buffer) 182004a403cSLoc Ho return -ENOMEM; 183004a403cSLoc Ho 184004a403cSLoc Ho alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 185004a403cSLoc Ho memcpy(alignbuffer, key, keylen); 186a70c5225SHerbert Xu ret = tfm->setkey(tfm, alignbuffer, keylen); 1878c32c516SHerbert Xu kzfree(buffer); 188004a403cSLoc Ho return ret; 189004a403cSLoc Ho } 190004a403cSLoc Ho 19166f6ce5eSHerbert Xu int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 192004a403cSLoc Ho unsigned int keylen) 193004a403cSLoc Ho { 194004a403cSLoc Ho unsigned long alignmask = crypto_ahash_alignmask(tfm); 195004a403cSLoc Ho 196004a403cSLoc Ho if ((unsigned long)key & alignmask) 197004a403cSLoc Ho return ahash_setkey_unaligned(tfm, key, keylen); 198004a403cSLoc Ho 199a70c5225SHerbert Xu return tfm->setkey(tfm, key, keylen); 200004a403cSLoc Ho } 20166f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_setkey); 202004a403cSLoc Ho 2033751f402SHerbert Xu static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, 2043751f402SHerbert Xu unsigned int keylen) 2053751f402SHerbert Xu { 2063751f402SHerbert Xu return -ENOSYS; 2073751f402SHerbert Xu } 2083751f402SHerbert Xu 20966f6ce5eSHerbert Xu static inline unsigned int ahash_align_buffer_size(unsigned len, 21066f6ce5eSHerbert Xu unsigned long mask) 21166f6ce5eSHerbert Xu { 21266f6ce5eSHerbert Xu return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); 21366f6ce5eSHerbert Xu } 21466f6ce5eSHerbert Xu 2151ffc9fbdSMarek Vasut static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) 21666f6ce5eSHerbert Xu { 21766f6ce5eSHerbert Xu struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 21866f6ce5eSHerbert Xu unsigned long alignmask = crypto_ahash_alignmask(tfm); 21966f6ce5eSHerbert Xu unsigned int ds = crypto_ahash_digestsize(tfm); 22066f6ce5eSHerbert Xu struct ahash_request_priv *priv; 22166f6ce5eSHerbert Xu 22266f6ce5eSHerbert Xu priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), 22366f6ce5eSHerbert Xu (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 2245befbd5aSSteffen Klassert GFP_KERNEL : GFP_ATOMIC); 22566f6ce5eSHerbert Xu if (!priv) 22666f6ce5eSHerbert Xu return -ENOMEM; 22766f6ce5eSHerbert Xu 228ab6bf4e5SMarek Vasut /* 229ab6bf4e5SMarek Vasut * WARNING: Voodoo programming below! 230ab6bf4e5SMarek Vasut * 231ab6bf4e5SMarek Vasut * The code below is obscure and hard to understand, thus explanation 232ab6bf4e5SMarek Vasut * is necessary. See include/crypto/hash.h and include/linux/crypto.h 233ab6bf4e5SMarek Vasut * to understand the layout of structures used here! 234ab6bf4e5SMarek Vasut * 235ab6bf4e5SMarek Vasut * The code here will replace portions of the ORIGINAL request with 236ab6bf4e5SMarek Vasut * pointers to new code and buffers so the hashing operation can store 237ab6bf4e5SMarek Vasut * the result in aligned buffer. We will call the modified request 238ab6bf4e5SMarek Vasut * an ADJUSTED request. 239ab6bf4e5SMarek Vasut * 240ab6bf4e5SMarek Vasut * The newly mangled request will look as such: 241ab6bf4e5SMarek Vasut * 242ab6bf4e5SMarek Vasut * req { 243ab6bf4e5SMarek Vasut * .result = ADJUSTED[new aligned buffer] 244ab6bf4e5SMarek Vasut * .base.complete = ADJUSTED[pointer to completion function] 245ab6bf4e5SMarek Vasut * .base.data = ADJUSTED[*req (pointer to self)] 246ab6bf4e5SMarek Vasut * .priv = ADJUSTED[new priv] { 247ab6bf4e5SMarek Vasut * .result = ORIGINAL(result) 248ab6bf4e5SMarek Vasut * .complete = ORIGINAL(base.complete) 249ab6bf4e5SMarek Vasut * .data = ORIGINAL(base.data) 250ab6bf4e5SMarek Vasut * } 251ab6bf4e5SMarek Vasut */ 252ab6bf4e5SMarek Vasut 25366f6ce5eSHerbert Xu priv->result = req->result; 25466f6ce5eSHerbert Xu priv->complete = req->base.complete; 25566f6ce5eSHerbert Xu priv->data = req->base.data; 256ab6bf4e5SMarek Vasut /* 257ab6bf4e5SMarek Vasut * WARNING: We do not backup req->priv here! The req->priv 258ab6bf4e5SMarek Vasut * is for internal use of the Crypto API and the 259ab6bf4e5SMarek Vasut * user must _NOT_ _EVER_ depend on it's content! 260ab6bf4e5SMarek Vasut */ 26166f6ce5eSHerbert Xu 26266f6ce5eSHerbert Xu req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); 2631ffc9fbdSMarek Vasut req->base.complete = cplt; 26466f6ce5eSHerbert Xu req->base.data = req; 26566f6ce5eSHerbert Xu req->priv = priv; 26666f6ce5eSHerbert Xu 2671ffc9fbdSMarek Vasut return 0; 2681ffc9fbdSMarek Vasut } 2691ffc9fbdSMarek Vasut 2701ffc9fbdSMarek Vasut static void ahash_restore_req(struct ahash_request *req) 2711ffc9fbdSMarek Vasut { 2721ffc9fbdSMarek Vasut struct ahash_request_priv *priv = req->priv; 2731ffc9fbdSMarek Vasut 2741ffc9fbdSMarek Vasut /* Restore the original crypto request. */ 2751ffc9fbdSMarek Vasut req->result = priv->result; 2761ffc9fbdSMarek Vasut req->base.complete = priv->complete; 2771ffc9fbdSMarek Vasut req->base.data = priv->data; 2781ffc9fbdSMarek Vasut req->priv = NULL; 2791ffc9fbdSMarek Vasut 2801ffc9fbdSMarek Vasut /* Free the req->priv.priv from the ADJUSTED request. */ 2811ffc9fbdSMarek Vasut kzfree(priv); 2821ffc9fbdSMarek Vasut } 2831ffc9fbdSMarek Vasut 2841ffc9fbdSMarek Vasut static void ahash_op_unaligned_finish(struct ahash_request *req, int err) 2851ffc9fbdSMarek Vasut { 2861ffc9fbdSMarek Vasut struct ahash_request_priv *priv = req->priv; 2871ffc9fbdSMarek Vasut 2881ffc9fbdSMarek Vasut if (err == -EINPROGRESS) 2891ffc9fbdSMarek Vasut return; 2901ffc9fbdSMarek Vasut 2911ffc9fbdSMarek Vasut if (!err) 2921ffc9fbdSMarek Vasut memcpy(priv->result, req->result, 2931ffc9fbdSMarek Vasut crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 2941ffc9fbdSMarek Vasut 2951ffc9fbdSMarek Vasut ahash_restore_req(req); 2961ffc9fbdSMarek Vasut } 2971ffc9fbdSMarek Vasut 2981ffc9fbdSMarek Vasut static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) 2991ffc9fbdSMarek Vasut { 3001ffc9fbdSMarek Vasut struct ahash_request *areq = req->data; 3011ffc9fbdSMarek Vasut 3021ffc9fbdSMarek Vasut /* 3031ffc9fbdSMarek Vasut * Restore the original request, see ahash_op_unaligned() for what 3041ffc9fbdSMarek Vasut * goes where. 3051ffc9fbdSMarek Vasut * 3061ffc9fbdSMarek Vasut * The "struct ahash_request *req" here is in fact the "req.base" 3071ffc9fbdSMarek Vasut * from the ADJUSTED request from ahash_op_unaligned(), thus as it 3081ffc9fbdSMarek Vasut * is a pointer to self, it is also the ADJUSTED "req" . 3091ffc9fbdSMarek Vasut */ 3101ffc9fbdSMarek Vasut 3111ffc9fbdSMarek Vasut /* First copy req->result into req->priv.result */ 3121ffc9fbdSMarek Vasut ahash_op_unaligned_finish(areq, err); 3131ffc9fbdSMarek Vasut 3141ffc9fbdSMarek Vasut /* Complete the ORIGINAL request. */ 3151ffc9fbdSMarek Vasut areq->base.complete(&areq->base, err); 3161ffc9fbdSMarek Vasut } 3171ffc9fbdSMarek Vasut 3181ffc9fbdSMarek Vasut static int ahash_op_unaligned(struct ahash_request *req, 3191ffc9fbdSMarek Vasut int (*op)(struct ahash_request *)) 3201ffc9fbdSMarek Vasut { 3211ffc9fbdSMarek Vasut int err; 3221ffc9fbdSMarek Vasut 3231ffc9fbdSMarek Vasut err = ahash_save_req(req, ahash_op_unaligned_done); 3241ffc9fbdSMarek Vasut if (err) 3251ffc9fbdSMarek Vasut return err; 3261ffc9fbdSMarek Vasut 32766f6ce5eSHerbert Xu err = op(req); 32866f6ce5eSHerbert Xu ahash_op_unaligned_finish(req, err); 32966f6ce5eSHerbert Xu 33066f6ce5eSHerbert Xu return err; 33166f6ce5eSHerbert Xu } 33266f6ce5eSHerbert Xu 33366f6ce5eSHerbert Xu static int crypto_ahash_op(struct ahash_request *req, 33466f6ce5eSHerbert Xu int (*op)(struct ahash_request *)) 33566f6ce5eSHerbert Xu { 33666f6ce5eSHerbert Xu struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 33766f6ce5eSHerbert Xu unsigned long alignmask = crypto_ahash_alignmask(tfm); 33866f6ce5eSHerbert Xu 33966f6ce5eSHerbert Xu if ((unsigned long)req->result & alignmask) 34066f6ce5eSHerbert Xu return ahash_op_unaligned(req, op); 34166f6ce5eSHerbert Xu 34266f6ce5eSHerbert Xu return op(req); 34366f6ce5eSHerbert Xu } 34466f6ce5eSHerbert Xu 34566f6ce5eSHerbert Xu int crypto_ahash_final(struct ahash_request *req) 34666f6ce5eSHerbert Xu { 34766f6ce5eSHerbert Xu return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); 34866f6ce5eSHerbert Xu } 34966f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_final); 35066f6ce5eSHerbert Xu 35166f6ce5eSHerbert Xu int crypto_ahash_finup(struct ahash_request *req) 35266f6ce5eSHerbert Xu { 35366f6ce5eSHerbert Xu return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); 35466f6ce5eSHerbert Xu } 35566f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_finup); 35666f6ce5eSHerbert Xu 35766f6ce5eSHerbert Xu int crypto_ahash_digest(struct ahash_request *req) 35866f6ce5eSHerbert Xu { 35966f6ce5eSHerbert Xu return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); 36066f6ce5eSHerbert Xu } 36166f6ce5eSHerbert Xu EXPORT_SYMBOL_GPL(crypto_ahash_digest); 36266f6ce5eSHerbert Xu 36366f6ce5eSHerbert Xu static void ahash_def_finup_finish2(struct ahash_request *req, int err) 36466f6ce5eSHerbert Xu { 36566f6ce5eSHerbert Xu struct ahash_request_priv *priv = req->priv; 36666f6ce5eSHerbert Xu 36766f6ce5eSHerbert Xu if (err == -EINPROGRESS) 36866f6ce5eSHerbert Xu return; 36966f6ce5eSHerbert Xu 37066f6ce5eSHerbert Xu if (!err) 37166f6ce5eSHerbert Xu memcpy(priv->result, req->result, 37266f6ce5eSHerbert Xu crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); 37366f6ce5eSHerbert Xu 374d4a7a0fbSMarek Vasut ahash_restore_req(req); 37566f6ce5eSHerbert Xu } 37666f6ce5eSHerbert Xu 37766f6ce5eSHerbert Xu static void ahash_def_finup_done2(struct crypto_async_request *req, int err) 37866f6ce5eSHerbert Xu { 37966f6ce5eSHerbert Xu struct ahash_request *areq = req->data; 38066f6ce5eSHerbert Xu 38166f6ce5eSHerbert Xu ahash_def_finup_finish2(areq, err); 38266f6ce5eSHerbert Xu 383d4a7a0fbSMarek Vasut areq->base.complete(&areq->base, err); 38466f6ce5eSHerbert Xu } 38566f6ce5eSHerbert Xu 38666f6ce5eSHerbert Xu static int ahash_def_finup_finish1(struct ahash_request *req, int err) 38766f6ce5eSHerbert Xu { 38866f6ce5eSHerbert Xu if (err) 38966f6ce5eSHerbert Xu goto out; 39066f6ce5eSHerbert Xu 39166f6ce5eSHerbert Xu req->base.complete = ahash_def_finup_done2; 39266f6ce5eSHerbert Xu req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; 39366f6ce5eSHerbert Xu err = crypto_ahash_reqtfm(req)->final(req); 39466f6ce5eSHerbert Xu 39566f6ce5eSHerbert Xu out: 39666f6ce5eSHerbert Xu ahash_def_finup_finish2(req, err); 39766f6ce5eSHerbert Xu return err; 39866f6ce5eSHerbert Xu } 39966f6ce5eSHerbert Xu 40066f6ce5eSHerbert Xu static void ahash_def_finup_done1(struct crypto_async_request *req, int err) 40166f6ce5eSHerbert Xu { 40266f6ce5eSHerbert Xu struct ahash_request *areq = req->data; 40366f6ce5eSHerbert Xu 40466f6ce5eSHerbert Xu err = ahash_def_finup_finish1(areq, err); 40566f6ce5eSHerbert Xu 406d4a7a0fbSMarek Vasut areq->base.complete(&areq->base, err); 40766f6ce5eSHerbert Xu } 40866f6ce5eSHerbert Xu 40966f6ce5eSHerbert Xu static int ahash_def_finup(struct ahash_request *req) 41066f6ce5eSHerbert Xu { 41166f6ce5eSHerbert Xu struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 412d4a7a0fbSMarek Vasut int err; 41366f6ce5eSHerbert Xu 414d4a7a0fbSMarek Vasut err = ahash_save_req(req, ahash_def_finup_done1); 415d4a7a0fbSMarek Vasut if (err) 416d4a7a0fbSMarek Vasut return err; 41766f6ce5eSHerbert Xu 418d4a7a0fbSMarek Vasut err = tfm->update(req); 419d4a7a0fbSMarek Vasut return ahash_def_finup_finish1(req, err); 42066f6ce5eSHerbert Xu } 42166f6ce5eSHerbert Xu 42266f6ce5eSHerbert Xu static int ahash_no_export(struct ahash_request *req, void *out) 42366f6ce5eSHerbert Xu { 42466f6ce5eSHerbert Xu return -ENOSYS; 42566f6ce5eSHerbert Xu } 42666f6ce5eSHerbert Xu 42766f6ce5eSHerbert Xu static int ahash_no_import(struct ahash_request *req, const void *in) 42866f6ce5eSHerbert Xu { 42966f6ce5eSHerbert Xu return -ENOSYS; 43066f6ce5eSHerbert Xu } 43166f6ce5eSHerbert Xu 43288056ec3SHerbert Xu static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) 43388056ec3SHerbert Xu { 43488056ec3SHerbert Xu struct crypto_ahash *hash = __crypto_ahash_cast(tfm); 43588056ec3SHerbert Xu struct ahash_alg *alg = crypto_ahash_alg(hash); 43688056ec3SHerbert Xu 43766f6ce5eSHerbert Xu hash->setkey = ahash_nosetkey; 438a5596d63SHerbert Xu hash->has_setkey = false; 43966f6ce5eSHerbert Xu hash->export = ahash_no_export; 44066f6ce5eSHerbert Xu hash->import = ahash_no_import; 44166f6ce5eSHerbert Xu 44288056ec3SHerbert Xu if (tfm->__crt_alg->cra_type != &crypto_ahash_type) 44388056ec3SHerbert Xu return crypto_init_shash_ops_async(tfm); 44488056ec3SHerbert Xu 44588056ec3SHerbert Xu hash->init = alg->init; 44688056ec3SHerbert Xu hash->update = alg->update; 44788056ec3SHerbert Xu hash->final = alg->final; 44866f6ce5eSHerbert Xu hash->finup = alg->finup ?: ahash_def_finup; 44988056ec3SHerbert Xu hash->digest = alg->digest; 45066f6ce5eSHerbert Xu 451a5596d63SHerbert Xu if (alg->setkey) { 45266f6ce5eSHerbert Xu hash->setkey = alg->setkey; 453a5596d63SHerbert Xu hash->has_setkey = true; 454a5596d63SHerbert Xu } 45566f6ce5eSHerbert Xu if (alg->export) 45666f6ce5eSHerbert Xu hash->export = alg->export; 45766f6ce5eSHerbert Xu if (alg->import) 45866f6ce5eSHerbert Xu hash->import = alg->import; 45988056ec3SHerbert Xu 46088056ec3SHerbert Xu return 0; 46188056ec3SHerbert Xu } 46288056ec3SHerbert Xu 46388056ec3SHerbert Xu static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) 46488056ec3SHerbert Xu { 4652495cf25SHerbert Xu if (alg->cra_type != &crypto_ahash_type) 46688056ec3SHerbert Xu return sizeof(struct crypto_shash *); 4672495cf25SHerbert Xu 4682495cf25SHerbert Xu return crypto_alg_extsize(alg); 46988056ec3SHerbert Xu } 47088056ec3SHerbert Xu 4713acc8473SHerbert Xu #ifdef CONFIG_NET 4726238cbaeSSteffen Klassert static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 4736238cbaeSSteffen Klassert { 4746238cbaeSSteffen Klassert struct crypto_report_hash rhash; 4756238cbaeSSteffen Klassert 4769a5467bfSMathias Krause strncpy(rhash.type, "ahash", sizeof(rhash.type)); 4776238cbaeSSteffen Klassert 4786238cbaeSSteffen Klassert rhash.blocksize = alg->cra_blocksize; 4796238cbaeSSteffen Klassert rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 4806238cbaeSSteffen Klassert 4816662df33SDavid S. Miller if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, 4826662df33SDavid S. Miller sizeof(struct crypto_report_hash), &rhash)) 4836662df33SDavid S. Miller goto nla_put_failure; 4846238cbaeSSteffen Klassert return 0; 4856238cbaeSSteffen Klassert 4866238cbaeSSteffen Klassert nla_put_failure: 4876238cbaeSSteffen Klassert return -EMSGSIZE; 4886238cbaeSSteffen Klassert } 4893acc8473SHerbert Xu #else 4903acc8473SHerbert Xu static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) 4913acc8473SHerbert Xu { 4923acc8473SHerbert Xu return -ENOSYS; 4933acc8473SHerbert Xu } 4943acc8473SHerbert Xu #endif 4956238cbaeSSteffen Klassert 496004a403cSLoc Ho static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 497*d8c34b94SGideon Israel Dsouza __maybe_unused; 498004a403cSLoc Ho static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) 499004a403cSLoc Ho { 500004a403cSLoc Ho seq_printf(m, "type : ahash\n"); 501004a403cSLoc Ho seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? 502004a403cSLoc Ho "yes" : "no"); 503004a403cSLoc Ho seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 50488056ec3SHerbert Xu seq_printf(m, "digestsize : %u\n", 50588056ec3SHerbert Xu __crypto_hash_alg_common(alg)->digestsize); 506004a403cSLoc Ho } 507004a403cSLoc Ho 508004a403cSLoc Ho const struct crypto_type crypto_ahash_type = { 50988056ec3SHerbert Xu .extsize = crypto_ahash_extsize, 51088056ec3SHerbert Xu .init_tfm = crypto_ahash_init_tfm, 511004a403cSLoc Ho #ifdef CONFIG_PROC_FS 512004a403cSLoc Ho .show = crypto_ahash_show, 513004a403cSLoc Ho #endif 5146238cbaeSSteffen Klassert .report = crypto_ahash_report, 51588056ec3SHerbert Xu .maskclear = ~CRYPTO_ALG_TYPE_MASK, 51688056ec3SHerbert Xu .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, 51788056ec3SHerbert Xu .type = CRYPTO_ALG_TYPE_AHASH, 51888056ec3SHerbert Xu .tfmsize = offsetof(struct crypto_ahash, base), 519004a403cSLoc Ho }; 520004a403cSLoc Ho EXPORT_SYMBOL_GPL(crypto_ahash_type); 521004a403cSLoc Ho 52288056ec3SHerbert Xu struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, 52388056ec3SHerbert Xu u32 mask) 52488056ec3SHerbert Xu { 52588056ec3SHerbert Xu return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); 52688056ec3SHerbert Xu } 52788056ec3SHerbert Xu EXPORT_SYMBOL_GPL(crypto_alloc_ahash); 52888056ec3SHerbert Xu 5298d18e34cSHerbert Xu int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) 5308d18e34cSHerbert Xu { 5318d18e34cSHerbert Xu return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); 5328d18e34cSHerbert Xu } 5338d18e34cSHerbert Xu EXPORT_SYMBOL_GPL(crypto_has_ahash); 5348d18e34cSHerbert Xu 53501c2deceSHerbert Xu static int ahash_prepare_alg(struct ahash_alg *alg) 53601c2deceSHerbert Xu { 53701c2deceSHerbert Xu struct crypto_alg *base = &alg->halg.base; 53801c2deceSHerbert Xu 53901c2deceSHerbert Xu if (alg->halg.digestsize > PAGE_SIZE / 8 || 5408996eafdSRussell King alg->halg.statesize > PAGE_SIZE / 8 || 5418996eafdSRussell King alg->halg.statesize == 0) 54201c2deceSHerbert Xu return -EINVAL; 54301c2deceSHerbert Xu 54401c2deceSHerbert Xu base->cra_type = &crypto_ahash_type; 54501c2deceSHerbert Xu base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 54601c2deceSHerbert Xu base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; 54701c2deceSHerbert Xu 54801c2deceSHerbert Xu return 0; 54901c2deceSHerbert Xu } 55001c2deceSHerbert Xu 55101c2deceSHerbert Xu int crypto_register_ahash(struct ahash_alg *alg) 55201c2deceSHerbert Xu { 55301c2deceSHerbert Xu struct crypto_alg *base = &alg->halg.base; 55401c2deceSHerbert Xu int err; 55501c2deceSHerbert Xu 55601c2deceSHerbert Xu err = ahash_prepare_alg(alg); 55701c2deceSHerbert Xu if (err) 55801c2deceSHerbert Xu return err; 55901c2deceSHerbert Xu 56001c2deceSHerbert Xu return crypto_register_alg(base); 56101c2deceSHerbert Xu } 56201c2deceSHerbert Xu EXPORT_SYMBOL_GPL(crypto_register_ahash); 56301c2deceSHerbert Xu 56401c2deceSHerbert Xu int crypto_unregister_ahash(struct ahash_alg *alg) 56501c2deceSHerbert Xu { 56601c2deceSHerbert Xu return crypto_unregister_alg(&alg->halg.base); 56701c2deceSHerbert Xu } 56801c2deceSHerbert Xu EXPORT_SYMBOL_GPL(crypto_unregister_ahash); 56901c2deceSHerbert Xu 57001c2deceSHerbert Xu int ahash_register_instance(struct crypto_template *tmpl, 57101c2deceSHerbert Xu struct ahash_instance *inst) 57201c2deceSHerbert Xu { 57301c2deceSHerbert Xu int err; 57401c2deceSHerbert Xu 57501c2deceSHerbert Xu err = ahash_prepare_alg(&inst->alg); 57601c2deceSHerbert Xu if (err) 57701c2deceSHerbert Xu return err; 57801c2deceSHerbert Xu 57901c2deceSHerbert Xu return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); 58001c2deceSHerbert Xu } 58101c2deceSHerbert Xu EXPORT_SYMBOL_GPL(ahash_register_instance); 58201c2deceSHerbert Xu 58301c2deceSHerbert Xu void ahash_free_instance(struct crypto_instance *inst) 58401c2deceSHerbert Xu { 58501c2deceSHerbert Xu crypto_drop_spawn(crypto_instance_ctx(inst)); 58601c2deceSHerbert Xu kfree(ahash_instance(inst)); 58701c2deceSHerbert Xu } 58801c2deceSHerbert Xu EXPORT_SYMBOL_GPL(ahash_free_instance); 58901c2deceSHerbert Xu 59001c2deceSHerbert Xu int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, 59101c2deceSHerbert Xu struct hash_alg_common *alg, 59201c2deceSHerbert Xu struct crypto_instance *inst) 59301c2deceSHerbert Xu { 59401c2deceSHerbert Xu return crypto_init_spawn2(&spawn->base, &alg->base, inst, 59501c2deceSHerbert Xu &crypto_ahash_type); 59601c2deceSHerbert Xu } 59701c2deceSHerbert Xu EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); 59801c2deceSHerbert Xu 59901c2deceSHerbert Xu struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) 60001c2deceSHerbert Xu { 60101c2deceSHerbert Xu struct crypto_alg *alg; 60201c2deceSHerbert Xu 60301c2deceSHerbert Xu alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); 60401c2deceSHerbert Xu return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); 60501c2deceSHerbert Xu } 60601c2deceSHerbert Xu EXPORT_SYMBOL_GPL(ahash_attr_alg); 60701c2deceSHerbert Xu 608004a403cSLoc Ho MODULE_LICENSE("GPL"); 609004a403cSLoc Ho MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); 610