1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) SHA crypto API support 4 * 5 * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/sched.h> 13 #include <linux/delay.h> 14 #include <linux/scatterlist.h> 15 #include <linux/crypto.h> 16 #include <crypto/algapi.h> 17 #include <crypto/hash.h> 18 #include <crypto/hmac.h> 19 #include <crypto/internal/hash.h> 20 #include <crypto/sha.h> 21 #include <crypto/scatterwalk.h> 22 23 #include "ccp-crypto.h" 24 25 static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) 26 { 27 struct ahash_request *req = ahash_request_cast(async_req); 28 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 29 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 30 unsigned int digest_size = crypto_ahash_digestsize(tfm); 31 32 if (ret) 33 goto e_free; 34 35 if (rctx->hash_rem) { 36 /* Save remaining data to buffer */ 37 unsigned int offset = rctx->nbytes - rctx->hash_rem; 38 39 scatterwalk_map_and_copy(rctx->buf, rctx->src, 40 offset, rctx->hash_rem, 0); 41 rctx->buf_count = rctx->hash_rem; 42 } else { 43 rctx->buf_count = 0; 44 } 45 46 /* Update result area if supplied */ 47 if (req->result && rctx->final) 48 memcpy(req->result, rctx->ctx, digest_size); 49 50 e_free: 51 sg_free_table(&rctx->data_sg); 52 53 return ret; 54 } 55 56 static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, 57 unsigned int final) 58 { 59 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 60 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 61 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 62 struct scatterlist *sg; 63 unsigned int block_size = 64 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 65 unsigned int sg_count; 66 gfp_t gfp; 67 u64 len; 68 int ret; 69 70 len = (u64)rctx->buf_count + (u64)nbytes; 71 72 if (!final && (len <= block_size)) { 73 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, 74 0, nbytes, 0); 75 rctx->buf_count += nbytes; 76 77 return 0; 78 } 79 80 rctx->src = req->src; 81 rctx->nbytes = nbytes; 82 83 rctx->final = final; 84 rctx->hash_rem = final ? 0 : len & (block_size - 1); 85 rctx->hash_cnt = len - rctx->hash_rem; 86 if (!final && !rctx->hash_rem) { 87 /* CCP can't do zero length final, so keep some data around */ 88 rctx->hash_cnt -= block_size; 89 rctx->hash_rem = block_size; 90 } 91 92 /* Initialize the context scatterlist */ 93 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); 94 95 sg = NULL; 96 if (rctx->buf_count && nbytes) { 97 /* Build the data scatterlist table - allocate enough entries 98 * for both data pieces (buffer and input data) 99 */ 100 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 101 GFP_KERNEL : GFP_ATOMIC; 102 sg_count = sg_nents(req->src) + 1; 103 ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); 104 if (ret) 105 return ret; 106 107 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 108 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); 109 if (!sg) { 110 ret = -EINVAL; 111 goto e_free; 112 } 113 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); 114 if (!sg) { 115 ret = -EINVAL; 116 goto e_free; 117 } 118 sg_mark_end(sg); 119 120 sg = rctx->data_sg.sgl; 121 } else if (rctx->buf_count) { 122 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); 123 124 sg = &rctx->buf_sg; 125 } else if (nbytes) { 126 sg = req->src; 127 } 128 129 rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ 130 131 memset(&rctx->cmd, 0, sizeof(rctx->cmd)); 132 INIT_LIST_HEAD(&rctx->cmd.entry); 133 rctx->cmd.engine = CCP_ENGINE_SHA; 134 rctx->cmd.u.sha.type = rctx->type; 135 rctx->cmd.u.sha.ctx = &rctx->ctx_sg; 136 137 switch (rctx->type) { 138 case CCP_SHA_TYPE_1: 139 rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE; 140 break; 141 case CCP_SHA_TYPE_224: 142 rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE; 143 break; 144 case CCP_SHA_TYPE_256: 145 rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; 146 break; 147 case CCP_SHA_TYPE_384: 148 rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE; 149 break; 150 case CCP_SHA_TYPE_512: 151 rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE; 152 break; 153 default: 154 /* Should never get here */ 155 break; 156 } 157 158 rctx->cmd.u.sha.src = sg; 159 rctx->cmd.u.sha.src_len = rctx->hash_cnt; 160 rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? 161 &ctx->u.sha.opad_sg : NULL; 162 rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? 163 ctx->u.sha.opad_count : 0; 164 rctx->cmd.u.sha.first = rctx->first; 165 rctx->cmd.u.sha.final = rctx->final; 166 rctx->cmd.u.sha.msg_bits = rctx->msg_bits; 167 168 rctx->first = 0; 169 170 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); 171 172 return ret; 173 174 e_free: 175 sg_free_table(&rctx->data_sg); 176 177 return ret; 178 } 179 180 static int ccp_sha_init(struct ahash_request *req) 181 { 182 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 183 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); 184 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 185 struct ccp_crypto_ahash_alg *alg = 186 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); 187 unsigned int block_size = 188 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 189 190 memset(rctx, 0, sizeof(*rctx)); 191 192 rctx->type = alg->type; 193 rctx->first = 1; 194 195 if (ctx->u.sha.key_len) { 196 /* Buffer the HMAC key for first update */ 197 memcpy(rctx->buf, ctx->u.sha.ipad, block_size); 198 rctx->buf_count = block_size; 199 } 200 201 return 0; 202 } 203 204 static int ccp_sha_update(struct ahash_request *req) 205 { 206 return ccp_do_sha_update(req, req->nbytes, 0); 207 } 208 209 static int ccp_sha_final(struct ahash_request *req) 210 { 211 return ccp_do_sha_update(req, 0, 1); 212 } 213 214 static int ccp_sha_finup(struct ahash_request *req) 215 { 216 return ccp_do_sha_update(req, req->nbytes, 1); 217 } 218 219 static int ccp_sha_digest(struct ahash_request *req) 220 { 221 int ret; 222 223 ret = ccp_sha_init(req); 224 if (ret) 225 return ret; 226 227 return ccp_sha_finup(req); 228 } 229 230 static int ccp_sha_export(struct ahash_request *req, void *out) 231 { 232 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 233 struct ccp_sha_exp_ctx state; 234 235 /* Don't let anything leak to 'out' */ 236 memset(&state, 0, sizeof(state)); 237 238 state.type = rctx->type; 239 state.msg_bits = rctx->msg_bits; 240 state.first = rctx->first; 241 memcpy(state.ctx, rctx->ctx, sizeof(state.ctx)); 242 state.buf_count = rctx->buf_count; 243 memcpy(state.buf, rctx->buf, sizeof(state.buf)); 244 245 /* 'out' may not be aligned so memcpy from local variable */ 246 memcpy(out, &state, sizeof(state)); 247 248 return 0; 249 } 250 251 static int ccp_sha_import(struct ahash_request *req, const void *in) 252 { 253 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); 254 struct ccp_sha_exp_ctx state; 255 256 /* 'in' may not be aligned so memcpy to local variable */ 257 memcpy(&state, in, sizeof(state)); 258 259 memset(rctx, 0, sizeof(*rctx)); 260 rctx->type = state.type; 261 rctx->msg_bits = state.msg_bits; 262 rctx->first = state.first; 263 memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx)); 264 rctx->buf_count = state.buf_count; 265 memcpy(rctx->buf, state.buf, sizeof(rctx->buf)); 266 267 return 0; 268 } 269 270 static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, 271 unsigned int key_len) 272 { 273 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 274 struct crypto_shash *shash = ctx->u.sha.hmac_tfm; 275 unsigned int block_size = crypto_shash_blocksize(shash); 276 unsigned int digest_size = crypto_shash_digestsize(shash); 277 int i, ret; 278 279 /* Set to zero until complete */ 280 ctx->u.sha.key_len = 0; 281 282 /* Clear key area to provide zero padding for keys smaller 283 * than the block size 284 */ 285 memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); 286 287 if (key_len > block_size) { 288 /* Must hash the input key */ 289 ret = crypto_shash_tfm_digest(shash, key, key_len, 290 ctx->u.sha.key); 291 if (ret) 292 return -EINVAL; 293 294 key_len = digest_size; 295 } else { 296 memcpy(ctx->u.sha.key, key, key_len); 297 } 298 299 for (i = 0; i < block_size; i++) { 300 ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE; 301 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE; 302 } 303 304 sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); 305 ctx->u.sha.opad_count = block_size; 306 307 ctx->u.sha.key_len = key_len; 308 309 return 0; 310 } 311 312 static int ccp_sha_cra_init(struct crypto_tfm *tfm) 313 { 314 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 315 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 316 317 ctx->complete = ccp_sha_complete; 318 ctx->u.sha.key_len = 0; 319 320 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sha_req_ctx)); 321 322 return 0; 323 } 324 325 static void ccp_sha_cra_exit(struct crypto_tfm *tfm) 326 { 327 } 328 329 static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) 330 { 331 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 332 struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); 333 struct crypto_shash *hmac_tfm; 334 335 hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); 336 if (IS_ERR(hmac_tfm)) { 337 pr_warn("could not load driver %s need for HMAC support\n", 338 alg->child_alg); 339 return PTR_ERR(hmac_tfm); 340 } 341 342 ctx->u.sha.hmac_tfm = hmac_tfm; 343 344 return ccp_sha_cra_init(tfm); 345 } 346 347 static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) 348 { 349 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); 350 351 if (ctx->u.sha.hmac_tfm) 352 crypto_free_shash(ctx->u.sha.hmac_tfm); 353 354 ccp_sha_cra_exit(tfm); 355 } 356 357 struct ccp_sha_def { 358 unsigned int version; 359 const char *name; 360 const char *drv_name; 361 enum ccp_sha_type type; 362 u32 digest_size; 363 u32 block_size; 364 }; 365 366 static struct ccp_sha_def sha_algs[] = { 367 { 368 .version = CCP_VERSION(3, 0), 369 .name = "sha1", 370 .drv_name = "sha1-ccp", 371 .type = CCP_SHA_TYPE_1, 372 .digest_size = SHA1_DIGEST_SIZE, 373 .block_size = SHA1_BLOCK_SIZE, 374 }, 375 { 376 .version = CCP_VERSION(3, 0), 377 .name = "sha224", 378 .drv_name = "sha224-ccp", 379 .type = CCP_SHA_TYPE_224, 380 .digest_size = SHA224_DIGEST_SIZE, 381 .block_size = SHA224_BLOCK_SIZE, 382 }, 383 { 384 .version = CCP_VERSION(3, 0), 385 .name = "sha256", 386 .drv_name = "sha256-ccp", 387 .type = CCP_SHA_TYPE_256, 388 .digest_size = SHA256_DIGEST_SIZE, 389 .block_size = SHA256_BLOCK_SIZE, 390 }, 391 { 392 .version = CCP_VERSION(5, 0), 393 .name = "sha384", 394 .drv_name = "sha384-ccp", 395 .type = CCP_SHA_TYPE_384, 396 .digest_size = SHA384_DIGEST_SIZE, 397 .block_size = SHA384_BLOCK_SIZE, 398 }, 399 { 400 .version = CCP_VERSION(5, 0), 401 .name = "sha512", 402 .drv_name = "sha512-ccp", 403 .type = CCP_SHA_TYPE_512, 404 .digest_size = SHA512_DIGEST_SIZE, 405 .block_size = SHA512_BLOCK_SIZE, 406 }, 407 }; 408 409 static int ccp_register_hmac_alg(struct list_head *head, 410 const struct ccp_sha_def *def, 411 const struct ccp_crypto_ahash_alg *base_alg) 412 { 413 struct ccp_crypto_ahash_alg *ccp_alg; 414 struct ahash_alg *alg; 415 struct hash_alg_common *halg; 416 struct crypto_alg *base; 417 int ret; 418 419 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 420 if (!ccp_alg) 421 return -ENOMEM; 422 423 /* Copy the base algorithm and only change what's necessary */ 424 *ccp_alg = *base_alg; 425 INIT_LIST_HEAD(&ccp_alg->entry); 426 427 strncpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); 428 429 alg = &ccp_alg->alg; 430 alg->setkey = ccp_sha_setkey; 431 432 halg = &alg->halg; 433 434 base = &halg->base; 435 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); 436 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", 437 def->drv_name); 438 base->cra_init = ccp_hmac_sha_cra_init; 439 base->cra_exit = ccp_hmac_sha_cra_exit; 440 441 ret = crypto_register_ahash(alg); 442 if (ret) { 443 pr_err("%s ahash algorithm registration error (%d)\n", 444 base->cra_name, ret); 445 kfree(ccp_alg); 446 return ret; 447 } 448 449 list_add(&ccp_alg->entry, head); 450 451 return ret; 452 } 453 454 static int ccp_register_sha_alg(struct list_head *head, 455 const struct ccp_sha_def *def) 456 { 457 struct ccp_crypto_ahash_alg *ccp_alg; 458 struct ahash_alg *alg; 459 struct hash_alg_common *halg; 460 struct crypto_alg *base; 461 int ret; 462 463 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); 464 if (!ccp_alg) 465 return -ENOMEM; 466 467 INIT_LIST_HEAD(&ccp_alg->entry); 468 469 ccp_alg->type = def->type; 470 471 alg = &ccp_alg->alg; 472 alg->init = ccp_sha_init; 473 alg->update = ccp_sha_update; 474 alg->final = ccp_sha_final; 475 alg->finup = ccp_sha_finup; 476 alg->digest = ccp_sha_digest; 477 alg->export = ccp_sha_export; 478 alg->import = ccp_sha_import; 479 480 halg = &alg->halg; 481 halg->digestsize = def->digest_size; 482 halg->statesize = sizeof(struct ccp_sha_exp_ctx); 483 484 base = &halg->base; 485 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); 486 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 487 def->drv_name); 488 base->cra_flags = CRYPTO_ALG_ASYNC | 489 CRYPTO_ALG_KERN_DRIVER_ONLY | 490 CRYPTO_ALG_NEED_FALLBACK; 491 base->cra_blocksize = def->block_size; 492 base->cra_ctxsize = sizeof(struct ccp_ctx); 493 base->cra_priority = CCP_CRA_PRIORITY; 494 base->cra_init = ccp_sha_cra_init; 495 base->cra_exit = ccp_sha_cra_exit; 496 base->cra_module = THIS_MODULE; 497 498 ret = crypto_register_ahash(alg); 499 if (ret) { 500 pr_err("%s ahash algorithm registration error (%d)\n", 501 base->cra_name, ret); 502 kfree(ccp_alg); 503 return ret; 504 } 505 506 list_add(&ccp_alg->entry, head); 507 508 ret = ccp_register_hmac_alg(head, def, ccp_alg); 509 510 return ret; 511 } 512 513 int ccp_register_sha_algs(struct list_head *head) 514 { 515 int i, ret; 516 unsigned int ccpversion = ccp_version(); 517 518 for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { 519 if (sha_algs[i].version > ccpversion) 520 continue; 521 ret = ccp_register_sha_alg(head, &sha_algs[i]); 522 if (ret) 523 return ret; 524 } 525 526 return 0; 527 } 528