1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Asymmetric algorithms supported by virtio crypto device 3 * 4 * Authors: zhenwei pi <pizhenwei@bytedance.com> 5 * lei he <helei.sig11@bytedance.com> 6 * 7 * Copyright 2022 Bytedance CO., LTD. 8 */ 9 10 #include <linux/mpi.h> 11 #include <linux/scatterlist.h> 12 #include <crypto/algapi.h> 13 #include <crypto/internal/akcipher.h> 14 #include <crypto/internal/rsa.h> 15 #include <linux/err.h> 16 #include <crypto/scatterwalk.h> 17 #include <linux/atomic.h> 18 19 #include <uapi/linux/virtio_crypto.h> 20 #include "virtio_crypto_common.h" 21 22 struct virtio_crypto_rsa_ctx { 23 MPI n; 24 }; 25 26 struct virtio_crypto_akcipher_ctx { 27 struct crypto_engine_ctx enginectx; 28 struct virtio_crypto *vcrypto; 29 struct crypto_akcipher *tfm; 30 bool session_valid; 31 __u64 session_id; 32 union { 33 struct virtio_crypto_rsa_ctx rsa_ctx; 34 }; 35 }; 36 37 struct virtio_crypto_akcipher_request { 38 struct virtio_crypto_request base; 39 struct virtio_crypto_akcipher_ctx *akcipher_ctx; 40 struct akcipher_request *akcipher_req; 41 void *src_buf; 42 void *dst_buf; 43 uint32_t opcode; 44 }; 45 46 struct virtio_crypto_akcipher_algo { 47 uint32_t algonum; 48 uint32_t service; 49 unsigned int active_devs; 50 struct akcipher_alg algo; 51 }; 52 53 static DEFINE_MUTEX(algs_lock); 54 55 static void virtio_crypto_akcipher_finalize_req( 56 struct virtio_crypto_akcipher_request *vc_akcipher_req, 57 struct akcipher_request *req, int err) 58 { 59 virtcrypto_clear_request(&vc_akcipher_req->base); 60 61 crypto_finalize_akcipher_request(vc_akcipher_req->base.dataq->engine, req, err); 62 } 63 64 static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *vc_req, int len) 65 { 66 struct virtio_crypto_akcipher_request *vc_akcipher_req = 67 container_of(vc_req, struct virtio_crypto_akcipher_request, base); 68 struct akcipher_request *akcipher_req; 69 int error; 70 71 switch (vc_req->status) { 72 case VIRTIO_CRYPTO_OK: 73 error = 0; 74 break; 75 case VIRTIO_CRYPTO_INVSESS: 76 case VIRTIO_CRYPTO_ERR: 77 error = -EINVAL; 78 break; 79 case VIRTIO_CRYPTO_BADMSG: 80 error = -EBADMSG; 81 break; 82 83 case VIRTIO_CRYPTO_KEY_REJECTED: 84 error = -EKEYREJECTED; 85 break; 86 87 default: 88 error = -EIO; 89 break; 90 } 91 92 akcipher_req = vc_akcipher_req->akcipher_req; 93 if (vc_akcipher_req->opcode != VIRTIO_CRYPTO_AKCIPHER_VERIFY) 94 sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst), 95 vc_akcipher_req->dst_buf, akcipher_req->dst_len); 96 virtio_crypto_akcipher_finalize_req(vc_akcipher_req, akcipher_req, error); 97 } 98 99 static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx, 100 struct virtio_crypto_ctrl_header *header, void *para, 101 const uint8_t *key, unsigned int keylen) 102 { 103 struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3]; 104 struct virtio_crypto *vcrypto = ctx->vcrypto; 105 uint8_t *pkey; 106 unsigned int inlen; 107 int err; 108 unsigned int num_out = 0, num_in = 0; 109 110 pkey = kmemdup(key, keylen, GFP_ATOMIC); 111 if (!pkey) 112 return -ENOMEM; 113 114 spin_lock(&vcrypto->ctrl_lock); 115 memcpy(&vcrypto->ctrl.header, header, sizeof(vcrypto->ctrl.header)); 116 memcpy(&vcrypto->ctrl.u, para, sizeof(vcrypto->ctrl.u)); 117 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR); 118 119 sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); 120 sgs[num_out++] = &outhdr_sg; 121 122 sg_init_one(&key_sg, pkey, keylen); 123 sgs[num_out++] = &key_sg; 124 125 sg_init_one(&inhdr_sg, &vcrypto->input, sizeof(vcrypto->input)); 126 sgs[num_out + num_in++] = &inhdr_sg; 127 128 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC); 129 if (err < 0) 130 goto out; 131 132 virtqueue_kick(vcrypto->ctrl_vq); 133 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) && 134 !virtqueue_is_broken(vcrypto->ctrl_vq)) 135 cpu_relax(); 136 137 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) { 138 err = -EINVAL; 139 goto out; 140 } 141 142 ctx->session_id = le64_to_cpu(vcrypto->input.session_id); 143 ctx->session_valid = true; 144 err = 0; 145 146 out: 147 spin_unlock(&vcrypto->ctrl_lock); 148 kfree_sensitive(pkey); 149 150 if (err < 0) 151 pr_err("virtio_crypto: Create session failed status: %u\n", 152 le32_to_cpu(vcrypto->input.status)); 153 154 return err; 155 } 156 157 static int virtio_crypto_alg_akcipher_close_session(struct virtio_crypto_akcipher_ctx *ctx) 158 { 159 struct scatterlist outhdr_sg, inhdr_sg, *sgs[2]; 160 struct virtio_crypto_destroy_session_req *destroy_session; 161 struct virtio_crypto *vcrypto = ctx->vcrypto; 162 unsigned int num_out = 0, num_in = 0, inlen; 163 int err; 164 165 spin_lock(&vcrypto->ctrl_lock); 166 if (!ctx->session_valid) { 167 err = 0; 168 goto out; 169 } 170 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR; 171 vcrypto->ctrl.header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION); 172 vcrypto->ctrl.header.queue_id = 0; 173 174 destroy_session = &vcrypto->ctrl.u.destroy_session; 175 destroy_session->session_id = cpu_to_le64(ctx->session_id); 176 177 sg_init_one(&outhdr_sg, &vcrypto->ctrl, sizeof(vcrypto->ctrl)); 178 sgs[num_out++] = &outhdr_sg; 179 180 sg_init_one(&inhdr_sg, &vcrypto->ctrl_status.status, sizeof(vcrypto->ctrl_status.status)); 181 sgs[num_out + num_in++] = &inhdr_sg; 182 183 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out, num_in, vcrypto, GFP_ATOMIC); 184 if (err < 0) 185 goto out; 186 187 virtqueue_kick(vcrypto->ctrl_vq); 188 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &inlen) && 189 !virtqueue_is_broken(vcrypto->ctrl_vq)) 190 cpu_relax(); 191 192 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) { 193 err = -EINVAL; 194 goto out; 195 } 196 197 err = 0; 198 ctx->session_valid = false; 199 200 out: 201 spin_unlock(&vcrypto->ctrl_lock); 202 if (err < 0) { 203 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n", 204 vcrypto->ctrl_status.status, destroy_session->session_id); 205 } 206 207 return err; 208 } 209 210 static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req, 211 struct akcipher_request *req, struct data_queue *data_vq) 212 { 213 struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; 214 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; 215 struct virtio_crypto *vcrypto = ctx->vcrypto; 216 struct virtio_crypto_op_data_req *req_data = vc_req->req_data; 217 struct scatterlist *sgs[4], outhdr_sg, inhdr_sg, srcdata_sg, dstdata_sg; 218 void *src_buf = NULL, *dst_buf = NULL; 219 unsigned int num_out = 0, num_in = 0; 220 int node = dev_to_node(&vcrypto->vdev->dev); 221 unsigned long flags; 222 int ret = -ENOMEM; 223 bool verify = vc_akcipher_req->opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY; 224 unsigned int src_len = verify ? req->src_len + req->dst_len : req->src_len; 225 226 /* out header */ 227 sg_init_one(&outhdr_sg, req_data, sizeof(*req_data)); 228 sgs[num_out++] = &outhdr_sg; 229 230 /* src data */ 231 src_buf = kcalloc_node(src_len, 1, GFP_KERNEL, node); 232 if (!src_buf) 233 goto err; 234 235 if (verify) { 236 /* for verify operation, both src and dst data work as OUT direction */ 237 sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len); 238 sg_init_one(&srcdata_sg, src_buf, src_len); 239 sgs[num_out++] = &srcdata_sg; 240 } else { 241 sg_copy_to_buffer(req->src, sg_nents(req->src), src_buf, src_len); 242 sg_init_one(&srcdata_sg, src_buf, src_len); 243 sgs[num_out++] = &srcdata_sg; 244 245 /* dst data */ 246 dst_buf = kcalloc_node(req->dst_len, 1, GFP_KERNEL, node); 247 if (!dst_buf) 248 goto err; 249 250 sg_init_one(&dstdata_sg, dst_buf, req->dst_len); 251 sgs[num_out + num_in++] = &dstdata_sg; 252 } 253 254 vc_akcipher_req->src_buf = src_buf; 255 vc_akcipher_req->dst_buf = dst_buf; 256 257 /* in header */ 258 sg_init_one(&inhdr_sg, &vc_req->status, sizeof(vc_req->status)); 259 sgs[num_out + num_in++] = &inhdr_sg; 260 261 spin_lock_irqsave(&data_vq->lock, flags); 262 ret = virtqueue_add_sgs(data_vq->vq, sgs, num_out, num_in, vc_req, GFP_ATOMIC); 263 virtqueue_kick(data_vq->vq); 264 spin_unlock_irqrestore(&data_vq->lock, flags); 265 if (ret) 266 goto err; 267 268 return 0; 269 270 err: 271 kfree(src_buf); 272 kfree(dst_buf); 273 274 return -ENOMEM; 275 } 276 277 static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq) 278 { 279 struct akcipher_request *req = container_of(vreq, struct akcipher_request, base); 280 struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req); 281 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; 282 struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; 283 struct virtio_crypto *vcrypto = ctx->vcrypto; 284 struct data_queue *data_vq = vc_req->dataq; 285 struct virtio_crypto_op_header *header; 286 struct virtio_crypto_akcipher_data_req *akcipher_req; 287 int ret; 288 289 vc_req->sgs = NULL; 290 vc_req->req_data = kzalloc_node(sizeof(*vc_req->req_data), 291 GFP_KERNEL, dev_to_node(&vcrypto->vdev->dev)); 292 if (!vc_req->req_data) 293 return -ENOMEM; 294 295 /* build request header */ 296 header = &vc_req->req_data->header; 297 header->opcode = cpu_to_le32(vc_akcipher_req->opcode); 298 header->algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA); 299 header->session_id = cpu_to_le64(ctx->session_id); 300 301 /* build request akcipher data */ 302 akcipher_req = &vc_req->req_data->u.akcipher_req; 303 akcipher_req->para.src_data_len = cpu_to_le32(req->src_len); 304 akcipher_req->para.dst_data_len = cpu_to_le32(req->dst_len); 305 306 ret = __virtio_crypto_akcipher_do_req(vc_akcipher_req, req, data_vq); 307 if (ret < 0) { 308 kfree_sensitive(vc_req->req_data); 309 vc_req->req_data = NULL; 310 return ret; 311 } 312 313 return 0; 314 } 315 316 static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode) 317 { 318 struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req); 319 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm); 320 struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req); 321 struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; 322 struct virtio_crypto *vcrypto = ctx->vcrypto; 323 /* Use the first data virtqueue as default */ 324 struct data_queue *data_vq = &vcrypto->data_vq[0]; 325 326 vc_req->dataq = data_vq; 327 vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback; 328 vc_akcipher_req->akcipher_ctx = ctx; 329 vc_akcipher_req->akcipher_req = req; 330 vc_akcipher_req->opcode = opcode; 331 332 return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req); 333 } 334 335 static int virtio_crypto_rsa_encrypt(struct akcipher_request *req) 336 { 337 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_ENCRYPT); 338 } 339 340 static int virtio_crypto_rsa_decrypt(struct akcipher_request *req) 341 { 342 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_DECRYPT); 343 } 344 345 static int virtio_crypto_rsa_sign(struct akcipher_request *req) 346 { 347 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_SIGN); 348 } 349 350 static int virtio_crypto_rsa_verify(struct akcipher_request *req) 351 { 352 return virtio_crypto_rsa_req(req, VIRTIO_CRYPTO_AKCIPHER_VERIFY); 353 } 354 355 static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, 356 const void *key, 357 unsigned int keylen, 358 bool private, 359 int padding_algo, 360 int hash_algo) 361 { 362 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 363 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; 364 struct virtio_crypto *vcrypto; 365 struct virtio_crypto_ctrl_header header; 366 struct virtio_crypto_akcipher_session_para para; 367 struct rsa_key rsa_key = {0}; 368 int node = virtio_crypto_get_current_node(); 369 uint32_t keytype; 370 int ret; 371 372 /* mpi_free will test n, just free it. */ 373 mpi_free(rsa_ctx->n); 374 rsa_ctx->n = NULL; 375 376 if (private) { 377 keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; 378 ret = rsa_parse_priv_key(&rsa_key, key, keylen); 379 } else { 380 keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC; 381 ret = rsa_parse_pub_key(&rsa_key, key, keylen); 382 } 383 384 if (ret) 385 return ret; 386 387 rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz); 388 if (!rsa_ctx->n) 389 return -ENOMEM; 390 391 if (!ctx->vcrypto) { 392 vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER, 393 VIRTIO_CRYPTO_AKCIPHER_RSA); 394 if (!vcrypto) { 395 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n"); 396 return -ENODEV; 397 } 398 399 ctx->vcrypto = vcrypto; 400 } else { 401 virtio_crypto_alg_akcipher_close_session(ctx); 402 } 403 404 /* set ctrl header */ 405 header.opcode = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION); 406 header.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA); 407 header.queue_id = 0; 408 409 /* set RSA para */ 410 para.algo = cpu_to_le32(VIRTIO_CRYPTO_AKCIPHER_RSA); 411 para.keytype = cpu_to_le32(keytype); 412 para.keylen = cpu_to_le32(keylen); 413 para.u.rsa.padding_algo = cpu_to_le32(padding_algo); 414 para.u.rsa.hash_algo = cpu_to_le32(hash_algo); 415 416 return virtio_crypto_alg_akcipher_init_session(ctx, &header, ¶, key, keylen); 417 } 418 419 static int virtio_crypto_rsa_raw_set_priv_key(struct crypto_akcipher *tfm, 420 const void *key, 421 unsigned int keylen) 422 { 423 return virtio_crypto_rsa_set_key(tfm, key, keylen, 1, 424 VIRTIO_CRYPTO_RSA_RAW_PADDING, 425 VIRTIO_CRYPTO_RSA_NO_HASH); 426 } 427 428 429 static int virtio_crypto_p1pad_rsa_sha1_set_priv_key(struct crypto_akcipher *tfm, 430 const void *key, 431 unsigned int keylen) 432 { 433 return virtio_crypto_rsa_set_key(tfm, key, keylen, 1, 434 VIRTIO_CRYPTO_RSA_PKCS1_PADDING, 435 VIRTIO_CRYPTO_RSA_SHA1); 436 } 437 438 static int virtio_crypto_rsa_raw_set_pub_key(struct crypto_akcipher *tfm, 439 const void *key, 440 unsigned int keylen) 441 { 442 return virtio_crypto_rsa_set_key(tfm, key, keylen, 0, 443 VIRTIO_CRYPTO_RSA_RAW_PADDING, 444 VIRTIO_CRYPTO_RSA_NO_HASH); 445 } 446 447 static int virtio_crypto_p1pad_rsa_sha1_set_pub_key(struct crypto_akcipher *tfm, 448 const void *key, 449 unsigned int keylen) 450 { 451 return virtio_crypto_rsa_set_key(tfm, key, keylen, 0, 452 VIRTIO_CRYPTO_RSA_PKCS1_PADDING, 453 VIRTIO_CRYPTO_RSA_SHA1); 454 } 455 456 static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm) 457 { 458 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 459 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; 460 461 return mpi_get_size(rsa_ctx->n); 462 } 463 464 static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) 465 { 466 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 467 468 ctx->tfm = tfm; 469 ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req; 470 ctx->enginectx.op.prepare_request = NULL; 471 ctx->enginectx.op.unprepare_request = NULL; 472 473 return 0; 474 } 475 476 static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm) 477 { 478 struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); 479 struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; 480 481 virtio_crypto_alg_akcipher_close_session(ctx); 482 virtcrypto_dev_put(ctx->vcrypto); 483 mpi_free(rsa_ctx->n); 484 rsa_ctx->n = NULL; 485 } 486 487 static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { 488 { 489 .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA, 490 .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER, 491 .algo = { 492 .encrypt = virtio_crypto_rsa_encrypt, 493 .decrypt = virtio_crypto_rsa_decrypt, 494 .set_pub_key = virtio_crypto_rsa_raw_set_pub_key, 495 .set_priv_key = virtio_crypto_rsa_raw_set_priv_key, 496 .max_size = virtio_crypto_rsa_max_size, 497 .init = virtio_crypto_rsa_init_tfm, 498 .exit = virtio_crypto_rsa_exit_tfm, 499 .reqsize = sizeof(struct virtio_crypto_akcipher_request), 500 .base = { 501 .cra_name = "rsa", 502 .cra_driver_name = "virtio-crypto-rsa", 503 .cra_priority = 150, 504 .cra_module = THIS_MODULE, 505 .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx), 506 }, 507 }, 508 }, 509 { 510 .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA, 511 .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER, 512 .algo = { 513 .encrypt = virtio_crypto_rsa_encrypt, 514 .decrypt = virtio_crypto_rsa_decrypt, 515 .sign = virtio_crypto_rsa_sign, 516 .verify = virtio_crypto_rsa_verify, 517 .set_pub_key = virtio_crypto_p1pad_rsa_sha1_set_pub_key, 518 .set_priv_key = virtio_crypto_p1pad_rsa_sha1_set_priv_key, 519 .max_size = virtio_crypto_rsa_max_size, 520 .init = virtio_crypto_rsa_init_tfm, 521 .exit = virtio_crypto_rsa_exit_tfm, 522 .reqsize = sizeof(struct virtio_crypto_akcipher_request), 523 .base = { 524 .cra_name = "pkcs1pad(rsa,sha1)", 525 .cra_driver_name = "virtio-pkcs1-rsa-with-sha1", 526 .cra_priority = 150, 527 .cra_module = THIS_MODULE, 528 .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx), 529 }, 530 }, 531 }, 532 }; 533 534 int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto) 535 { 536 int ret = 0; 537 int i = 0; 538 539 mutex_lock(&algs_lock); 540 541 for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) { 542 uint32_t service = virtio_crypto_akcipher_algs[i].service; 543 uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum; 544 545 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum)) 546 continue; 547 548 if (virtio_crypto_akcipher_algs[i].active_devs == 0) { 549 ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo); 550 if (ret) 551 goto unlock; 552 } 553 554 virtio_crypto_akcipher_algs[i].active_devs++; 555 dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n", 556 virtio_crypto_akcipher_algs[i].algo.base.cra_name); 557 } 558 559 unlock: 560 mutex_unlock(&algs_lock); 561 return ret; 562 } 563 564 void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto) 565 { 566 int i = 0; 567 568 mutex_lock(&algs_lock); 569 570 for (i = 0; i < ARRAY_SIZE(virtio_crypto_akcipher_algs); i++) { 571 uint32_t service = virtio_crypto_akcipher_algs[i].service; 572 uint32_t algonum = virtio_crypto_akcipher_algs[i].algonum; 573 574 if (virtio_crypto_akcipher_algs[i].active_devs == 0 || 575 !virtcrypto_algo_is_supported(vcrypto, service, algonum)) 576 continue; 577 578 if (virtio_crypto_akcipher_algs[i].active_devs == 1) 579 crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo); 580 581 virtio_crypto_akcipher_algs[i].active_devs--; 582 } 583 584 mutex_unlock(&algs_lock); 585 } 586