1 /** 2 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver 3 * 4 * Copyright (C) 2011-2012 International Business Machines Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 only. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Author: Kent Yoder <yoder1@us.ibm.com> 20 */ 21 22 #include <crypto/internal/hash.h> 23 #include <crypto/sha.h> 24 #include <linux/module.h> 25 #include <asm/vio.h> 26 27 #include "nx_csbcpb.h" 28 #include "nx.h" 29 30 31 static int nx_sha256_init(struct shash_desc *desc) 32 { 33 struct sha256_state *sctx = shash_desc_ctx(desc); 34 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 35 struct nx_sg *out_sg; 36 37 nx_ctx_init(nx_ctx, HCOP_FC_SHA); 38 39 memset(sctx, 0, sizeof *sctx); 40 41 nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; 42 43 NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); 44 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, 45 SHA256_DIGEST_SIZE, nx_ctx->ap->sglen); 46 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 47 48 return 0; 49 } 50 51 static int nx_sha256_update(struct shash_desc *desc, const u8 *data, 52 unsigned int len) 53 { 54 struct sha256_state *sctx = shash_desc_ctx(desc); 55 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 56 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 57 struct nx_sg *in_sg; 58 u64 to_process, leftover; 59 int rc = 0; 60 61 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 62 /* we've hit the nx chip previously and we're updating again, 63 * so copy over the partial digest */ 64 memcpy(csbcpb->cpb.sha256.input_partial_digest, 65 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 66 } 67 68 /* 2 cases for total data len: 69 * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 70 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover 71 */ 72 if (len + sctx->count < SHA256_BLOCK_SIZE) { 73 memcpy(sctx->buf + sctx->count, data, len); 74 sctx->count += len; 75 goto out; 76 } 77 78 /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this 79 * update */ 80 to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1); 81 leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1); 82 83 if (sctx->count) { 84 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 85 sctx->count, nx_ctx->ap->sglen); 86 in_sg = nx_build_sg_list(in_sg, (u8 *)data, 87 to_process - sctx->count, 88 nx_ctx->ap->sglen); 89 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 90 sizeof(struct nx_sg); 91 } else { 92 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, 93 to_process, nx_ctx->ap->sglen); 94 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * 95 sizeof(struct nx_sg); 96 } 97 98 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 99 100 if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { 101 rc = -EINVAL; 102 goto out; 103 } 104 105 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 106 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 107 if (rc) 108 goto out; 109 110 atomic_inc(&(nx_ctx->stats->sha256_ops)); 111 112 /* copy the leftover back into the state struct */ 113 if (leftover) 114 memcpy(sctx->buf, data + len - leftover, leftover); 115 sctx->count = leftover; 116 117 csbcpb->cpb.sha256.message_bit_length += (u64) 118 (csbcpb->cpb.sha256.spbc * 8); 119 120 /* everything after the first update is continuation */ 121 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 122 out: 123 return rc; 124 } 125 126 static int nx_sha256_final(struct shash_desc *desc, u8 *out) 127 { 128 struct sha256_state *sctx = shash_desc_ctx(desc); 129 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 130 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 131 struct nx_sg *in_sg, *out_sg; 132 int rc; 133 134 135 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 136 /* we've hit the nx chip previously, now we're finalizing, 137 * so copy over the partial digest */ 138 memcpy(csbcpb->cpb.sha256.input_partial_digest, 139 csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 140 } 141 142 /* final is represented by continuing the operation and indicating that 143 * this is not an intermediate operation */ 144 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; 145 146 csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8); 147 148 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf, 149 sctx->count, nx_ctx->ap->sglen); 150 out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE, 151 nx_ctx->ap->sglen); 152 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); 153 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); 154 155 if (!nx_ctx->op.outlen) { 156 rc = -EINVAL; 157 goto out; 158 } 159 160 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 161 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); 162 if (rc) 163 goto out; 164 165 atomic_inc(&(nx_ctx->stats->sha256_ops)); 166 167 atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, 168 &(nx_ctx->stats->sha256_bytes)); 169 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 170 out: 171 return rc; 172 } 173 174 static int nx_sha256_export(struct shash_desc *desc, void *out) 175 { 176 struct sha256_state *sctx = shash_desc_ctx(desc); 177 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 178 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 179 struct sha256_state *octx = out; 180 181 octx->count = sctx->count + 182 (csbcpb->cpb.sha256.message_bit_length / 8); 183 memcpy(octx->buf, sctx->buf, sizeof(octx->buf)); 184 185 /* if no data has been processed yet, we need to export SHA256's 186 * initial data, in case this context gets imported into a software 187 * context */ 188 if (csbcpb->cpb.sha256.message_bit_length) 189 memcpy(octx->state, csbcpb->cpb.sha256.message_digest, 190 SHA256_DIGEST_SIZE); 191 else { 192 octx->state[0] = SHA256_H0; 193 octx->state[1] = SHA256_H1; 194 octx->state[2] = SHA256_H2; 195 octx->state[3] = SHA256_H3; 196 octx->state[4] = SHA256_H4; 197 octx->state[5] = SHA256_H5; 198 octx->state[6] = SHA256_H6; 199 octx->state[7] = SHA256_H7; 200 } 201 202 return 0; 203 } 204 205 static int nx_sha256_import(struct shash_desc *desc, const void *in) 206 { 207 struct sha256_state *sctx = shash_desc_ctx(desc); 208 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); 209 struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; 210 const struct sha256_state *ictx = in; 211 212 memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf)); 213 214 sctx->count = ictx->count & 0x3f; 215 csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8; 216 217 if (csbcpb->cpb.sha256.message_bit_length) { 218 memcpy(csbcpb->cpb.sha256.message_digest, ictx->state, 219 SHA256_DIGEST_SIZE); 220 221 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; 222 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; 223 } 224 225 return 0; 226 } 227 228 struct shash_alg nx_shash_sha256_alg = { 229 .digestsize = SHA256_DIGEST_SIZE, 230 .init = nx_sha256_init, 231 .update = nx_sha256_update, 232 .final = nx_sha256_final, 233 .export = nx_sha256_export, 234 .import = nx_sha256_import, 235 .descsize = sizeof(struct sha256_state), 236 .statesize = sizeof(struct sha256_state), 237 .base = { 238 .cra_name = "sha256", 239 .cra_driver_name = "sha256-nx", 240 .cra_priority = 300, 241 .cra_flags = CRYPTO_ALG_TYPE_SHASH, 242 .cra_blocksize = SHA256_BLOCK_SIZE, 243 .cra_module = THIS_MODULE, 244 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 245 .cra_init = nx_crypto_ctx_sha_init, 246 .cra_exit = nx_crypto_ctx_exit, 247 } 248 }; 249