1 /* 2 * Virtio crypto Support 3 * 4 * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD. 5 * 6 * Authors: 7 * Gonglei <arei.gonglei@huawei.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * (at your option) any later version. See the COPYING file in the 11 * top-level directory. 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qemu/iov.h" 16 #include "qemu/main-loop.h" 17 #include "qemu/module.h" 18 #include "qapi/error.h" 19 #include "qemu/error-report.h" 20 21 #include "hw/virtio/virtio.h" 22 #include "hw/virtio/virtio-crypto.h" 23 #include "hw/qdev-properties.h" 24 #include "hw/virtio/virtio-access.h" 25 #include "standard-headers/linux/virtio_ids.h" 26 #include "sysemu/cryptodev-vhost.h" 27 28 #define VIRTIO_CRYPTO_VM_VERSION 1 29 30 /* 31 * Transfer virtqueue index to crypto queue index. 32 * The control virtqueue is after the data virtqueues 33 * so the input value doesn't need to be adjusted 34 */ 35 static inline int virtio_crypto_vq2q(int queue_index) 36 { 37 return queue_index; 38 } 39 40 static int 41 virtio_crypto_cipher_session_helper(VirtIODevice *vdev, 42 CryptoDevBackendSymSessionInfo *info, 43 struct virtio_crypto_cipher_session_para *cipher_para, 44 struct iovec **iov, unsigned int *out_num) 45 { 46 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 47 unsigned int num = *out_num; 48 49 info->cipher_alg = ldl_le_p(&cipher_para->algo); 50 info->key_len = ldl_le_p(&cipher_para->keylen); 51 info->direction = ldl_le_p(&cipher_para->op); 52 DPRINTF("cipher_alg=%" PRIu32 ", info->direction=%" PRIu32 "\n", 53 info->cipher_alg, info->direction); 54 55 if (info->key_len > vcrypto->conf.max_cipher_key_len) { 56 error_report("virtio-crypto length of cipher key is too big: %u", 57 info->key_len); 58 return -VIRTIO_CRYPTO_ERR; 59 } 60 /* Get cipher key */ 61 if (info->key_len > 0) { 62 size_t s; 63 DPRINTF("keylen=%" PRIu32 "\n", info->key_len); 64 65 info->cipher_key = g_malloc(info->key_len); 66 s = iov_to_buf(*iov, num, 0, info->cipher_key, info->key_len); 67 if (unlikely(s != info->key_len)) { 68 virtio_error(vdev, "virtio-crypto cipher key incorrect"); 69 return -EFAULT; 70 } 71 iov_discard_front(iov, &num, info->key_len); 72 *out_num = num; 73 } 74 75 return 0; 76 } 77 78 static int64_t 79 virtio_crypto_create_sym_session(VirtIOCrypto *vcrypto, 80 struct virtio_crypto_sym_create_session_req *sess_req, 81 uint32_t queue_id, 82 uint32_t opcode, 83 struct iovec *iov, unsigned int out_num) 84 { 85 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 86 CryptoDevBackendSessionInfo info; 87 CryptoDevBackendSymSessionInfo *sym_info; 88 int64_t session_id; 89 int queue_index; 90 uint32_t op_type; 91 Error *local_err = NULL; 92 int ret; 93 94 memset(&info, 0, sizeof(info)); 95 op_type = ldl_le_p(&sess_req->op_type); 96 info.op_code = opcode; 97 98 sym_info = &info.u.sym_sess_info; 99 sym_info->op_type = op_type; 100 101 if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { 102 ret = virtio_crypto_cipher_session_helper(vdev, sym_info, 103 &sess_req->u.cipher.para, 104 &iov, &out_num); 105 if (ret < 0) { 106 goto err; 107 } 108 } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { 109 size_t s; 110 /* cipher part */ 111 ret = virtio_crypto_cipher_session_helper(vdev, sym_info, 112 &sess_req->u.chain.para.cipher_param, 113 &iov, &out_num); 114 if (ret < 0) { 115 goto err; 116 } 117 /* hash part */ 118 sym_info->alg_chain_order = ldl_le_p( 119 &sess_req->u.chain.para.alg_chain_order); 120 sym_info->add_len = ldl_le_p(&sess_req->u.chain.para.aad_len); 121 sym_info->hash_mode = ldl_le_p(&sess_req->u.chain.para.hash_mode); 122 if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) { 123 sym_info->hash_alg = 124 ldl_le_p(&sess_req->u.chain.para.u.mac_param.algo); 125 sym_info->auth_key_len = ldl_le_p( 126 &sess_req->u.chain.para.u.mac_param.auth_key_len); 127 sym_info->hash_result_len = ldl_le_p( 128 &sess_req->u.chain.para.u.mac_param.hash_result_len); 129 if (sym_info->auth_key_len > vcrypto->conf.max_auth_key_len) { 130 error_report("virtio-crypto length of auth key is too big: %u", 131 sym_info->auth_key_len); 132 ret = -VIRTIO_CRYPTO_ERR; 133 goto err; 134 } 135 /* get auth key */ 136 if (sym_info->auth_key_len > 0) { 137 sym_info->auth_key = g_malloc(sym_info->auth_key_len); 138 s = iov_to_buf(iov, out_num, 0, sym_info->auth_key, 139 sym_info->auth_key_len); 140 if (unlikely(s != sym_info->auth_key_len)) { 141 virtio_error(vdev, 142 "virtio-crypto authenticated key incorrect"); 143 ret = -EFAULT; 144 goto err; 145 } 146 iov_discard_front(&iov, &out_num, sym_info->auth_key_len); 147 } 148 } else if (sym_info->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) { 149 sym_info->hash_alg = ldl_le_p( 150 &sess_req->u.chain.para.u.hash_param.algo); 151 sym_info->hash_result_len = ldl_le_p( 152 &sess_req->u.chain.para.u.hash_param.hash_result_len); 153 } else { 154 /* VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ 155 error_report("unsupported hash mode"); 156 ret = -VIRTIO_CRYPTO_NOTSUPP; 157 goto err; 158 } 159 } else { 160 /* VIRTIO_CRYPTO_SYM_OP_NONE */ 161 error_report("unsupported cipher op_type: VIRTIO_CRYPTO_SYM_OP_NONE"); 162 ret = -VIRTIO_CRYPTO_NOTSUPP; 163 goto err; 164 } 165 166 queue_index = virtio_crypto_vq2q(queue_id); 167 session_id = cryptodev_backend_create_session( 168 vcrypto->cryptodev, 169 &info, queue_index, &local_err); 170 if (session_id >= 0) { 171 ret = session_id; 172 } else { 173 if (local_err) { 174 error_report_err(local_err); 175 } 176 ret = -VIRTIO_CRYPTO_ERR; 177 } 178 179 err: 180 g_free(sym_info->cipher_key); 181 g_free(sym_info->auth_key); 182 return ret; 183 } 184 185 static int64_t 186 virtio_crypto_create_asym_session(VirtIOCrypto *vcrypto, 187 struct virtio_crypto_akcipher_create_session_req *sess_req, 188 uint32_t queue_id, uint32_t opcode, 189 struct iovec *iov, unsigned int out_num) 190 { 191 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 192 CryptoDevBackendSessionInfo info = {0}; 193 CryptoDevBackendAsymSessionInfo *asym_info; 194 int64_t session_id; 195 int queue_index; 196 uint32_t algo, keytype, keylen; 197 g_autofree uint8_t *key = NULL; 198 Error *local_err = NULL; 199 200 algo = ldl_le_p(&sess_req->para.algo); 201 keytype = ldl_le_p(&sess_req->para.keytype); 202 keylen = ldl_le_p(&sess_req->para.keylen); 203 204 if ((keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC) 205 && (keytype != VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE)) { 206 error_report("unsupported asym keytype: %d", keytype); 207 return -VIRTIO_CRYPTO_NOTSUPP; 208 } 209 210 if (keylen) { 211 key = g_malloc(keylen); 212 if (iov_to_buf(iov, out_num, 0, key, keylen) != keylen) { 213 virtio_error(vdev, "virtio-crypto asym key incorrect"); 214 return -EFAULT; 215 } 216 iov_discard_front(&iov, &out_num, keylen); 217 } 218 219 info.op_code = opcode; 220 asym_info = &info.u.asym_sess_info; 221 asym_info->algo = algo; 222 asym_info->keytype = keytype; 223 asym_info->keylen = keylen; 224 asym_info->key = key; 225 switch (asym_info->algo) { 226 case VIRTIO_CRYPTO_AKCIPHER_RSA: 227 asym_info->u.rsa.padding_algo = 228 ldl_le_p(&sess_req->para.u.rsa.padding_algo); 229 asym_info->u.rsa.hash_algo = 230 ldl_le_p(&sess_req->para.u.rsa.hash_algo); 231 break; 232 233 /* TODO DSA&ECDSA handling */ 234 235 default: 236 return -VIRTIO_CRYPTO_ERR; 237 } 238 239 queue_index = virtio_crypto_vq2q(queue_id); 240 session_id = cryptodev_backend_create_session(vcrypto->cryptodev, &info, 241 queue_index, &local_err); 242 if (session_id < 0) { 243 if (local_err) { 244 error_report_err(local_err); 245 } 246 return -VIRTIO_CRYPTO_ERR; 247 } 248 249 return session_id; 250 } 251 252 static uint8_t 253 virtio_crypto_handle_close_session(VirtIOCrypto *vcrypto, 254 struct virtio_crypto_destroy_session_req *close_sess_req, 255 uint32_t queue_id) 256 { 257 int ret; 258 uint64_t session_id; 259 uint32_t status; 260 Error *local_err = NULL; 261 262 session_id = ldq_le_p(&close_sess_req->session_id); 263 DPRINTF("close session, id=%" PRIu64 "\n", session_id); 264 265 ret = cryptodev_backend_close_session( 266 vcrypto->cryptodev, session_id, queue_id, &local_err); 267 if (ret == 0) { 268 status = VIRTIO_CRYPTO_OK; 269 } else { 270 if (local_err) { 271 error_report_err(local_err); 272 } else { 273 error_report("destroy session failed"); 274 } 275 status = VIRTIO_CRYPTO_ERR; 276 } 277 278 return status; 279 } 280 281 static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) 282 { 283 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 284 struct virtio_crypto_op_ctrl_req ctrl; 285 VirtQueueElement *elem; 286 struct iovec *in_iov; 287 struct iovec *out_iov; 288 unsigned in_num; 289 unsigned out_num; 290 uint32_t queue_id; 291 uint32_t opcode; 292 struct virtio_crypto_session_input input; 293 int64_t session_id; 294 uint8_t status; 295 size_t s; 296 297 for (;;) { 298 g_autofree struct iovec *out_iov_copy = NULL; 299 300 elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); 301 if (!elem) { 302 break; 303 } 304 if (elem->out_num < 1 || elem->in_num < 1) { 305 virtio_error(vdev, "virtio-crypto ctrl missing headers"); 306 virtqueue_detach_element(vq, elem, 0); 307 g_free(elem); 308 break; 309 } 310 311 out_num = elem->out_num; 312 out_iov_copy = g_memdup2(elem->out_sg, sizeof(out_iov[0]) * out_num); 313 out_iov = out_iov_copy; 314 315 in_num = elem->in_num; 316 in_iov = elem->in_sg; 317 318 if (unlikely(iov_to_buf(out_iov, out_num, 0, &ctrl, sizeof(ctrl)) 319 != sizeof(ctrl))) { 320 virtio_error(vdev, "virtio-crypto request ctrl_hdr too short"); 321 virtqueue_detach_element(vq, elem, 0); 322 g_free(elem); 323 break; 324 } 325 iov_discard_front(&out_iov, &out_num, sizeof(ctrl)); 326 327 opcode = ldl_le_p(&ctrl.header.opcode); 328 queue_id = ldl_le_p(&ctrl.header.queue_id); 329 330 memset(&input, 0, sizeof(input)); 331 switch (opcode) { 332 case VIRTIO_CRYPTO_CIPHER_CREATE_SESSION: 333 session_id = virtio_crypto_create_sym_session(vcrypto, 334 &ctrl.u.sym_create_session, 335 queue_id, opcode, 336 out_iov, out_num); 337 goto check_session; 338 339 case VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION: 340 session_id = virtio_crypto_create_asym_session(vcrypto, 341 &ctrl.u.akcipher_create_session, 342 queue_id, opcode, 343 out_iov, out_num); 344 345 check_session: 346 /* Serious errors, need to reset virtio crypto device */ 347 if (session_id == -EFAULT) { 348 virtqueue_detach_element(vq, elem, 0); 349 break; 350 } else if (session_id == -VIRTIO_CRYPTO_NOTSUPP) { 351 stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); 352 } else if (session_id == -VIRTIO_CRYPTO_ERR) { 353 stl_le_p(&input.status, VIRTIO_CRYPTO_ERR); 354 } else { 355 /* Set the session id */ 356 stq_le_p(&input.session_id, session_id); 357 stl_le_p(&input.status, VIRTIO_CRYPTO_OK); 358 } 359 360 s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); 361 if (unlikely(s != sizeof(input))) { 362 virtio_error(vdev, "virtio-crypto input incorrect"); 363 virtqueue_detach_element(vq, elem, 0); 364 break; 365 } 366 virtqueue_push(vq, elem, sizeof(input)); 367 virtio_notify(vdev, vq); 368 break; 369 370 case VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION: 371 case VIRTIO_CRYPTO_HASH_DESTROY_SESSION: 372 case VIRTIO_CRYPTO_MAC_DESTROY_SESSION: 373 case VIRTIO_CRYPTO_AEAD_DESTROY_SESSION: 374 case VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION: 375 status = virtio_crypto_handle_close_session(vcrypto, 376 &ctrl.u.destroy_session, queue_id); 377 /* The status only occupy one byte, we can directly use it */ 378 s = iov_from_buf(in_iov, in_num, 0, &status, sizeof(status)); 379 if (unlikely(s != sizeof(status))) { 380 virtio_error(vdev, "virtio-crypto status incorrect"); 381 virtqueue_detach_element(vq, elem, 0); 382 break; 383 } 384 virtqueue_push(vq, elem, sizeof(status)); 385 virtio_notify(vdev, vq); 386 break; 387 case VIRTIO_CRYPTO_HASH_CREATE_SESSION: 388 case VIRTIO_CRYPTO_MAC_CREATE_SESSION: 389 case VIRTIO_CRYPTO_AEAD_CREATE_SESSION: 390 default: 391 error_report("virtio-crypto unsupported ctrl opcode: %d", opcode); 392 stl_le_p(&input.status, VIRTIO_CRYPTO_NOTSUPP); 393 s = iov_from_buf(in_iov, in_num, 0, &input, sizeof(input)); 394 if (unlikely(s != sizeof(input))) { 395 virtio_error(vdev, "virtio-crypto input incorrect"); 396 virtqueue_detach_element(vq, elem, 0); 397 break; 398 } 399 virtqueue_push(vq, elem, sizeof(input)); 400 virtio_notify(vdev, vq); 401 402 break; 403 } /* end switch case */ 404 405 g_free(elem); 406 } /* end for loop */ 407 } 408 409 static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq, 410 VirtIOCryptoReq *req) 411 { 412 req->vcrypto = vcrypto; 413 req->vq = vq; 414 req->in = NULL; 415 req->in_iov = NULL; 416 req->in_num = 0; 417 req->in_len = 0; 418 req->flags = CRYPTODEV_BACKEND_ALG__MAX; 419 memset(&req->op_info, 0x00, sizeof(req->op_info)); 420 } 421 422 static void virtio_crypto_free_request(VirtIOCryptoReq *req) 423 { 424 if (!req) { 425 return; 426 } 427 428 if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { 429 size_t max_len; 430 CryptoDevBackendSymOpInfo *op_info = req->op_info.u.sym_op_info; 431 432 max_len = op_info->iv_len + 433 op_info->aad_len + 434 op_info->src_len + 435 op_info->dst_len + 436 op_info->digest_result_len; 437 438 /* Zeroize and free request data structure */ 439 memset(op_info, 0, sizeof(*op_info) + max_len); 440 g_free(op_info); 441 } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { 442 CryptoDevBackendAsymOpInfo *op_info = req->op_info.u.asym_op_info; 443 if (op_info) { 444 g_free(op_info->src); 445 g_free(op_info->dst); 446 memset(op_info, 0, sizeof(*op_info)); 447 g_free(op_info); 448 } 449 } 450 451 g_free(req); 452 } 453 454 static void 455 virtio_crypto_sym_input_data_helper(VirtIODevice *vdev, 456 VirtIOCryptoReq *req, 457 uint32_t status, 458 CryptoDevBackendSymOpInfo *sym_op_info) 459 { 460 size_t s, len; 461 462 if (status != VIRTIO_CRYPTO_OK) { 463 return; 464 } 465 466 len = sym_op_info->src_len; 467 /* Save the cipher result */ 468 s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len); 469 if (s != len) { 470 virtio_error(vdev, "virtio-crypto dest data incorrect"); 471 return; 472 } 473 474 iov_discard_front(&req->in_iov, &req->in_num, len); 475 476 if (sym_op_info->op_type == 477 VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { 478 /* Save the digest result */ 479 s = iov_from_buf(req->in_iov, req->in_num, 0, 480 sym_op_info->digest_result, 481 sym_op_info->digest_result_len); 482 if (s != sym_op_info->digest_result_len) { 483 virtio_error(vdev, "virtio-crypto digest result incorrect"); 484 } 485 } 486 } 487 488 static void 489 virtio_crypto_akcipher_input_data_helper(VirtIODevice *vdev, 490 VirtIOCryptoReq *req, int32_t status, 491 CryptoDevBackendAsymOpInfo *asym_op_info) 492 { 493 size_t s, len; 494 495 if (status != VIRTIO_CRYPTO_OK) { 496 return; 497 } 498 499 len = asym_op_info->dst_len; 500 if (!len) { 501 return; 502 } 503 504 s = iov_from_buf(req->in_iov, req->in_num, 0, asym_op_info->dst, len); 505 if (s != len) { 506 virtio_error(vdev, "virtio-crypto asym dest data incorrect"); 507 return; 508 } 509 510 iov_discard_front(&req->in_iov, &req->in_num, len); 511 512 /* For akcipher, dst_len may be changed after operation */ 513 req->in_len = sizeof(struct virtio_crypto_inhdr) + asym_op_info->dst_len; 514 } 515 516 517 static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status) 518 { 519 VirtIOCrypto *vcrypto = req->vcrypto; 520 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 521 522 if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) { 523 virtio_crypto_sym_input_data_helper(vdev, req, status, 524 req->op_info.u.sym_op_info); 525 } else if (req->flags == CRYPTODEV_BACKEND_ALG_ASYM) { 526 virtio_crypto_akcipher_input_data_helper(vdev, req, status, 527 req->op_info.u.asym_op_info); 528 } 529 stb_p(&req->in->status, status); 530 virtqueue_push(req->vq, &req->elem, req->in_len); 531 virtio_notify(vdev, req->vq); 532 } 533 534 static VirtIOCryptoReq * 535 virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq) 536 { 537 VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq)); 538 539 if (req) { 540 virtio_crypto_init_request(s, vq, req); 541 } 542 return req; 543 } 544 545 static CryptoDevBackendSymOpInfo * 546 virtio_crypto_sym_op_helper(VirtIODevice *vdev, 547 struct virtio_crypto_cipher_para *cipher_para, 548 struct virtio_crypto_alg_chain_data_para *alg_chain_para, 549 struct iovec *iov, unsigned int out_num) 550 { 551 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 552 CryptoDevBackendSymOpInfo *op_info; 553 uint32_t src_len = 0, dst_len = 0; 554 uint32_t iv_len = 0; 555 uint32_t aad_len = 0, hash_result_len = 0; 556 uint32_t hash_start_src_offset = 0, len_to_hash = 0; 557 uint32_t cipher_start_src_offset = 0, len_to_cipher = 0; 558 559 uint64_t max_len, curr_size = 0; 560 size_t s; 561 562 /* Plain cipher */ 563 if (cipher_para) { 564 iv_len = ldl_le_p(&cipher_para->iv_len); 565 src_len = ldl_le_p(&cipher_para->src_data_len); 566 dst_len = ldl_le_p(&cipher_para->dst_data_len); 567 } else if (alg_chain_para) { /* Algorithm chain */ 568 iv_len = ldl_le_p(&alg_chain_para->iv_len); 569 src_len = ldl_le_p(&alg_chain_para->src_data_len); 570 dst_len = ldl_le_p(&alg_chain_para->dst_data_len); 571 572 aad_len = ldl_le_p(&alg_chain_para->aad_len); 573 hash_result_len = ldl_le_p(&alg_chain_para->hash_result_len); 574 hash_start_src_offset = ldl_le_p( 575 &alg_chain_para->hash_start_src_offset); 576 cipher_start_src_offset = ldl_le_p( 577 &alg_chain_para->cipher_start_src_offset); 578 len_to_cipher = ldl_le_p(&alg_chain_para->len_to_cipher); 579 len_to_hash = ldl_le_p(&alg_chain_para->len_to_hash); 580 } else { 581 return NULL; 582 } 583 584 max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len; 585 if (unlikely(max_len > vcrypto->conf.max_size)) { 586 virtio_error(vdev, "virtio-crypto too big length"); 587 return NULL; 588 } 589 590 op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len); 591 op_info->iv_len = iv_len; 592 op_info->src_len = src_len; 593 op_info->dst_len = dst_len; 594 op_info->aad_len = aad_len; 595 op_info->digest_result_len = hash_result_len; 596 op_info->hash_start_src_offset = hash_start_src_offset; 597 op_info->len_to_hash = len_to_hash; 598 op_info->cipher_start_src_offset = cipher_start_src_offset; 599 op_info->len_to_cipher = len_to_cipher; 600 /* Handle the initilization vector */ 601 if (op_info->iv_len > 0) { 602 DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len); 603 op_info->iv = op_info->data + curr_size; 604 605 s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len); 606 if (unlikely(s != op_info->iv_len)) { 607 virtio_error(vdev, "virtio-crypto iv incorrect"); 608 goto err; 609 } 610 iov_discard_front(&iov, &out_num, op_info->iv_len); 611 curr_size += op_info->iv_len; 612 } 613 614 /* Handle additional authentication data if exists */ 615 if (op_info->aad_len > 0) { 616 DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len); 617 op_info->aad_data = op_info->data + curr_size; 618 619 s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len); 620 if (unlikely(s != op_info->aad_len)) { 621 virtio_error(vdev, "virtio-crypto additional auth data incorrect"); 622 goto err; 623 } 624 iov_discard_front(&iov, &out_num, op_info->aad_len); 625 626 curr_size += op_info->aad_len; 627 } 628 629 /* Handle the source data */ 630 if (op_info->src_len > 0) { 631 DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len); 632 op_info->src = op_info->data + curr_size; 633 634 s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len); 635 if (unlikely(s != op_info->src_len)) { 636 virtio_error(vdev, "virtio-crypto source data incorrect"); 637 goto err; 638 } 639 iov_discard_front(&iov, &out_num, op_info->src_len); 640 641 curr_size += op_info->src_len; 642 } 643 644 /* Handle the destination data */ 645 op_info->dst = op_info->data + curr_size; 646 curr_size += op_info->dst_len; 647 648 DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len); 649 650 /* Handle the hash digest result */ 651 if (hash_result_len > 0) { 652 DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len); 653 op_info->digest_result = op_info->data + curr_size; 654 } 655 656 return op_info; 657 658 err: 659 g_free(op_info); 660 return NULL; 661 } 662 663 static int 664 virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto, 665 struct virtio_crypto_sym_data_req *req, 666 CryptoDevBackendOpInfo *op_info, 667 struct iovec *iov, unsigned int out_num) 668 { 669 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 670 CryptoDevBackendSymOpInfo *sym_op_info; 671 uint32_t op_type; 672 673 op_type = ldl_le_p(&req->op_type); 674 if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) { 675 sym_op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para, 676 NULL, iov, out_num); 677 if (!sym_op_info) { 678 return -EFAULT; 679 } 680 } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) { 681 sym_op_info = virtio_crypto_sym_op_helper(vdev, NULL, 682 &req->u.chain.para, 683 iov, out_num); 684 if (!sym_op_info) { 685 return -EFAULT; 686 } 687 } else { 688 /* VIRTIO_CRYPTO_SYM_OP_NONE */ 689 error_report("virtio-crypto unsupported cipher type"); 690 return -VIRTIO_CRYPTO_NOTSUPP; 691 } 692 693 sym_op_info->op_type = op_type; 694 op_info->u.sym_op_info = sym_op_info; 695 696 return 0; 697 } 698 699 static int 700 virtio_crypto_handle_asym_req(VirtIOCrypto *vcrypto, 701 struct virtio_crypto_akcipher_data_req *req, 702 CryptoDevBackendOpInfo *op_info, 703 struct iovec *iov, unsigned int out_num) 704 { 705 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 706 CryptoDevBackendAsymOpInfo *asym_op_info; 707 uint32_t src_len; 708 uint32_t dst_len; 709 uint32_t len; 710 uint8_t *src = NULL; 711 uint8_t *dst = NULL; 712 713 asym_op_info = g_malloc0(sizeof(CryptoDevBackendAsymOpInfo)); 714 src_len = ldl_le_p(&req->para.src_data_len); 715 dst_len = ldl_le_p(&req->para.dst_data_len); 716 717 if (src_len > 0) { 718 src = g_malloc0(src_len); 719 len = iov_to_buf(iov, out_num, 0, src, src_len); 720 if (unlikely(len != src_len)) { 721 virtio_error(vdev, "virtio-crypto asym src data incorrect" 722 "expected %u, actual %u", src_len, len); 723 goto err; 724 } 725 726 iov_discard_front(&iov, &out_num, src_len); 727 } 728 729 if (dst_len > 0) { 730 dst = g_malloc0(dst_len); 731 732 if (op_info->op_code == VIRTIO_CRYPTO_AKCIPHER_VERIFY) { 733 len = iov_to_buf(iov, out_num, 0, dst, dst_len); 734 if (unlikely(len != dst_len)) { 735 virtio_error(vdev, "virtio-crypto asym dst data incorrect" 736 "expected %u, actual %u", dst_len, len); 737 goto err; 738 } 739 740 iov_discard_front(&iov, &out_num, dst_len); 741 } 742 } 743 744 asym_op_info->src_len = src_len; 745 asym_op_info->dst_len = dst_len; 746 asym_op_info->src = src; 747 asym_op_info->dst = dst; 748 op_info->u.asym_op_info = asym_op_info; 749 750 return 0; 751 752 err: 753 g_free(asym_op_info); 754 g_free(src); 755 g_free(dst); 756 757 return -EFAULT; 758 } 759 760 static int 761 virtio_crypto_handle_request(VirtIOCryptoReq *request) 762 { 763 VirtIOCrypto *vcrypto = request->vcrypto; 764 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 765 VirtQueueElement *elem = &request->elem; 766 int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq)); 767 struct virtio_crypto_op_data_req req; 768 int ret; 769 g_autofree struct iovec *in_iov_copy = NULL; 770 g_autofree struct iovec *out_iov_copy = NULL; 771 struct iovec *in_iov; 772 struct iovec *out_iov; 773 unsigned in_num; 774 unsigned out_num; 775 uint32_t opcode; 776 uint8_t status = VIRTIO_CRYPTO_ERR; 777 CryptoDevBackendOpInfo *op_info = &request->op_info; 778 Error *local_err = NULL; 779 780 if (elem->out_num < 1 || elem->in_num < 1) { 781 virtio_error(vdev, "virtio-crypto dataq missing headers"); 782 return -1; 783 } 784 785 out_num = elem->out_num; 786 out_iov_copy = g_memdup2(elem->out_sg, sizeof(out_iov[0]) * out_num); 787 out_iov = out_iov_copy; 788 789 in_num = elem->in_num; 790 in_iov_copy = g_memdup2(elem->in_sg, sizeof(in_iov[0]) * in_num); 791 in_iov = in_iov_copy; 792 793 if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req)) 794 != sizeof(req))) { 795 virtio_error(vdev, "virtio-crypto request outhdr too short"); 796 return -1; 797 } 798 iov_discard_front(&out_iov, &out_num, sizeof(req)); 799 800 if (in_iov[in_num - 1].iov_len < 801 sizeof(struct virtio_crypto_inhdr)) { 802 virtio_error(vdev, "virtio-crypto request inhdr too short"); 803 return -1; 804 } 805 /* We always touch the last byte, so just see how big in_iov is. */ 806 request->in_len = iov_size(in_iov, in_num); 807 request->in = (void *)in_iov[in_num - 1].iov_base 808 + in_iov[in_num - 1].iov_len 809 - sizeof(struct virtio_crypto_inhdr); 810 iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr)); 811 812 /* 813 * The length of operation result, including dest_data 814 * and digest_result if exists. 815 */ 816 request->in_num = in_num; 817 request->in_iov = in_iov; 818 819 opcode = ldl_le_p(&req.header.opcode); 820 op_info->session_id = ldq_le_p(&req.header.session_id); 821 op_info->op_code = opcode; 822 823 switch (opcode) { 824 case VIRTIO_CRYPTO_CIPHER_ENCRYPT: 825 case VIRTIO_CRYPTO_CIPHER_DECRYPT: 826 op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_SYM; 827 ret = virtio_crypto_handle_sym_req(vcrypto, 828 &req.u.sym_req, op_info, 829 out_iov, out_num); 830 goto check_result; 831 832 case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT: 833 case VIRTIO_CRYPTO_AKCIPHER_DECRYPT: 834 case VIRTIO_CRYPTO_AKCIPHER_SIGN: 835 case VIRTIO_CRYPTO_AKCIPHER_VERIFY: 836 op_info->algtype = request->flags = CRYPTODEV_BACKEND_ALG_ASYM; 837 ret = virtio_crypto_handle_asym_req(vcrypto, 838 &req.u.akcipher_req, op_info, 839 out_iov, out_num); 840 841 check_result: 842 /* Serious errors, need to reset virtio crypto device */ 843 if (ret == -EFAULT) { 844 return -1; 845 } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) { 846 virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); 847 virtio_crypto_free_request(request); 848 } else { 849 850 /* Set request's parameter */ 851 ret = cryptodev_backend_crypto_operation(vcrypto->cryptodev, 852 request, queue_index, &local_err); 853 if (ret < 0) { 854 status = -ret; 855 if (local_err) { 856 error_report_err(local_err); 857 } 858 } else { /* ret == VIRTIO_CRYPTO_OK */ 859 status = ret; 860 } 861 virtio_crypto_req_complete(request, status); 862 virtio_crypto_free_request(request); 863 } 864 break; 865 866 case VIRTIO_CRYPTO_HASH: 867 case VIRTIO_CRYPTO_MAC: 868 case VIRTIO_CRYPTO_AEAD_ENCRYPT: 869 case VIRTIO_CRYPTO_AEAD_DECRYPT: 870 default: 871 error_report("virtio-crypto unsupported dataq opcode: %u", 872 opcode); 873 virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP); 874 virtio_crypto_free_request(request); 875 } 876 877 return 0; 878 } 879 880 static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq) 881 { 882 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 883 VirtIOCryptoReq *req; 884 885 while ((req = virtio_crypto_get_request(vcrypto, vq))) { 886 if (virtio_crypto_handle_request(req) < 0) { 887 virtqueue_detach_element(req->vq, &req->elem, 0); 888 virtio_crypto_free_request(req); 889 break; 890 } 891 } 892 } 893 894 static void virtio_crypto_dataq_bh(void *opaque) 895 { 896 VirtIOCryptoQueue *q = opaque; 897 VirtIOCrypto *vcrypto = q->vcrypto; 898 VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto); 899 900 /* This happens when device was stopped but BH wasn't. */ 901 if (!vdev->vm_running) { 902 return; 903 } 904 905 /* Just in case the driver is not ready on more */ 906 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) { 907 return; 908 } 909 910 for (;;) { 911 virtio_crypto_handle_dataq(vdev, q->dataq); 912 virtio_queue_set_notification(q->dataq, 1); 913 914 /* Are we done or did the guest add more buffers? */ 915 if (virtio_queue_empty(q->dataq)) { 916 break; 917 } 918 919 virtio_queue_set_notification(q->dataq, 0); 920 } 921 } 922 923 static void 924 virtio_crypto_handle_dataq_bh(VirtIODevice *vdev, VirtQueue *vq) 925 { 926 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 927 VirtIOCryptoQueue *q = 928 &vcrypto->vqs[virtio_crypto_vq2q(virtio_get_queue_index(vq))]; 929 930 /* This happens when device was stopped but VCPU wasn't. */ 931 if (!vdev->vm_running) { 932 return; 933 } 934 virtio_queue_set_notification(vq, 0); 935 qemu_bh_schedule(q->dataq_bh); 936 } 937 938 static uint64_t virtio_crypto_get_features(VirtIODevice *vdev, 939 uint64_t features, 940 Error **errp) 941 { 942 return features; 943 } 944 945 static void virtio_crypto_reset(VirtIODevice *vdev) 946 { 947 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 948 /* multiqueue is disabled by default */ 949 vcrypto->curr_queues = 1; 950 if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) { 951 vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY; 952 } else { 953 vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY; 954 } 955 } 956 957 static void virtio_crypto_init_config(VirtIODevice *vdev) 958 { 959 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 960 961 vcrypto->conf.crypto_services = 962 vcrypto->conf.cryptodev->conf.crypto_services; 963 vcrypto->conf.cipher_algo_l = 964 vcrypto->conf.cryptodev->conf.cipher_algo_l; 965 vcrypto->conf.cipher_algo_h = 966 vcrypto->conf.cryptodev->conf.cipher_algo_h; 967 vcrypto->conf.hash_algo = vcrypto->conf.cryptodev->conf.hash_algo; 968 vcrypto->conf.mac_algo_l = vcrypto->conf.cryptodev->conf.mac_algo_l; 969 vcrypto->conf.mac_algo_h = vcrypto->conf.cryptodev->conf.mac_algo_h; 970 vcrypto->conf.aead_algo = vcrypto->conf.cryptodev->conf.aead_algo; 971 vcrypto->conf.akcipher_algo = vcrypto->conf.cryptodev->conf.akcipher_algo; 972 vcrypto->conf.max_cipher_key_len = 973 vcrypto->conf.cryptodev->conf.max_cipher_key_len; 974 vcrypto->conf.max_auth_key_len = 975 vcrypto->conf.cryptodev->conf.max_auth_key_len; 976 vcrypto->conf.max_size = vcrypto->conf.cryptodev->conf.max_size; 977 } 978 979 static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) 980 { 981 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 982 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); 983 int i; 984 985 vcrypto->cryptodev = vcrypto->conf.cryptodev; 986 if (vcrypto->cryptodev == NULL) { 987 error_setg(errp, "'cryptodev' parameter expects a valid object"); 988 return; 989 } else if (cryptodev_backend_is_used(vcrypto->cryptodev)) { 990 error_setg(errp, "can't use already used cryptodev backend: %s", 991 object_get_canonical_path_component(OBJECT(vcrypto->conf.cryptodev))); 992 return; 993 } 994 995 vcrypto->max_queues = MAX(vcrypto->cryptodev->conf.peers.queues, 1); 996 if (vcrypto->max_queues + 1 > VIRTIO_QUEUE_MAX) { 997 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " 998 "must be a positive integer less than %d.", 999 vcrypto->max_queues, VIRTIO_QUEUE_MAX); 1000 return; 1001 } 1002 1003 virtio_init(vdev, VIRTIO_ID_CRYPTO, vcrypto->config_size); 1004 vcrypto->curr_queues = 1; 1005 vcrypto->vqs = g_new0(VirtIOCryptoQueue, vcrypto->max_queues); 1006 for (i = 0; i < vcrypto->max_queues; i++) { 1007 vcrypto->vqs[i].dataq = 1008 virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); 1009 vcrypto->vqs[i].dataq_bh = 1010 qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); 1011 vcrypto->vqs[i].vcrypto = vcrypto; 1012 } 1013 1014 vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl); 1015 if (!cryptodev_backend_is_ready(vcrypto->cryptodev)) { 1016 vcrypto->status &= ~VIRTIO_CRYPTO_S_HW_READY; 1017 } else { 1018 vcrypto->status |= VIRTIO_CRYPTO_S_HW_READY; 1019 } 1020 1021 virtio_crypto_init_config(vdev); 1022 cryptodev_backend_set_used(vcrypto->cryptodev, true); 1023 } 1024 1025 static void virtio_crypto_device_unrealize(DeviceState *dev) 1026 { 1027 VirtIODevice *vdev = VIRTIO_DEVICE(dev); 1028 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(dev); 1029 VirtIOCryptoQueue *q; 1030 int i, max_queues; 1031 1032 max_queues = vcrypto->multiqueue ? vcrypto->max_queues : 1; 1033 for (i = 0; i < max_queues; i++) { 1034 virtio_delete_queue(vcrypto->vqs[i].dataq); 1035 q = &vcrypto->vqs[i]; 1036 qemu_bh_delete(q->dataq_bh); 1037 } 1038 1039 g_free(vcrypto->vqs); 1040 virtio_delete_queue(vcrypto->ctrl_vq); 1041 1042 virtio_cleanup(vdev); 1043 cryptodev_backend_set_used(vcrypto->cryptodev, false); 1044 } 1045 1046 static const VMStateDescription vmstate_virtio_crypto = { 1047 .name = "virtio-crypto", 1048 .unmigratable = 1, 1049 .minimum_version_id = VIRTIO_CRYPTO_VM_VERSION, 1050 .version_id = VIRTIO_CRYPTO_VM_VERSION, 1051 .fields = (VMStateField[]) { 1052 VMSTATE_VIRTIO_DEVICE, 1053 VMSTATE_END_OF_LIST() 1054 }, 1055 }; 1056 1057 static Property virtio_crypto_properties[] = { 1058 DEFINE_PROP_LINK("cryptodev", VirtIOCrypto, conf.cryptodev, 1059 TYPE_CRYPTODEV_BACKEND, CryptoDevBackend *), 1060 DEFINE_PROP_END_OF_LIST(), 1061 }; 1062 1063 static void virtio_crypto_get_config(VirtIODevice *vdev, uint8_t *config) 1064 { 1065 VirtIOCrypto *c = VIRTIO_CRYPTO(vdev); 1066 struct virtio_crypto_config crypto_cfg = {}; 1067 1068 /* 1069 * Virtio-crypto device conforms to VIRTIO 1.0 which is always LE, 1070 * so we can use LE accessors directly. 1071 */ 1072 stl_le_p(&crypto_cfg.status, c->status); 1073 stl_le_p(&crypto_cfg.max_dataqueues, c->max_queues); 1074 stl_le_p(&crypto_cfg.crypto_services, c->conf.crypto_services); 1075 stl_le_p(&crypto_cfg.cipher_algo_l, c->conf.cipher_algo_l); 1076 stl_le_p(&crypto_cfg.cipher_algo_h, c->conf.cipher_algo_h); 1077 stl_le_p(&crypto_cfg.hash_algo, c->conf.hash_algo); 1078 stl_le_p(&crypto_cfg.mac_algo_l, c->conf.mac_algo_l); 1079 stl_le_p(&crypto_cfg.mac_algo_h, c->conf.mac_algo_h); 1080 stl_le_p(&crypto_cfg.aead_algo, c->conf.aead_algo); 1081 stl_le_p(&crypto_cfg.max_cipher_key_len, c->conf.max_cipher_key_len); 1082 stl_le_p(&crypto_cfg.max_auth_key_len, c->conf.max_auth_key_len); 1083 stq_le_p(&crypto_cfg.max_size, c->conf.max_size); 1084 stl_le_p(&crypto_cfg.akcipher_algo, c->conf.akcipher_algo); 1085 1086 memcpy(config, &crypto_cfg, c->config_size); 1087 } 1088 1089 static bool virtio_crypto_started(VirtIOCrypto *c, uint8_t status) 1090 { 1091 VirtIODevice *vdev = VIRTIO_DEVICE(c); 1092 return (status & VIRTIO_CONFIG_S_DRIVER_OK) && 1093 (c->status & VIRTIO_CRYPTO_S_HW_READY) && vdev->vm_running; 1094 } 1095 1096 static void virtio_crypto_vhost_status(VirtIOCrypto *c, uint8_t status) 1097 { 1098 VirtIODevice *vdev = VIRTIO_DEVICE(c); 1099 int queues = c->multiqueue ? c->max_queues : 1; 1100 CryptoDevBackend *b = c->cryptodev; 1101 CryptoDevBackendClient *cc = b->conf.peers.ccs[0]; 1102 1103 if (!cryptodev_get_vhost(cc, b, 0)) { 1104 return; 1105 } 1106 1107 if ((virtio_crypto_started(c, status)) == !!c->vhost_started) { 1108 return; 1109 } 1110 1111 if (!c->vhost_started) { 1112 int r; 1113 1114 c->vhost_started = 1; 1115 r = cryptodev_vhost_start(vdev, queues); 1116 if (r < 0) { 1117 error_report("unable to start vhost crypto: %d: " 1118 "falling back on userspace virtio", -r); 1119 c->vhost_started = 0; 1120 } 1121 } else { 1122 cryptodev_vhost_stop(vdev, queues); 1123 c->vhost_started = 0; 1124 } 1125 } 1126 1127 static void virtio_crypto_set_status(VirtIODevice *vdev, uint8_t status) 1128 { 1129 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 1130 1131 virtio_crypto_vhost_status(vcrypto, status); 1132 } 1133 1134 static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx, 1135 bool mask) 1136 { 1137 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 1138 int queue = virtio_crypto_vq2q(idx); 1139 1140 assert(vcrypto->vhost_started); 1141 1142 cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask); 1143 } 1144 1145 static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx) 1146 { 1147 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 1148 int queue = virtio_crypto_vq2q(idx); 1149 1150 assert(vcrypto->vhost_started); 1151 1152 return cryptodev_vhost_virtqueue_pending(vdev, queue, idx); 1153 } 1154 1155 static struct vhost_dev *virtio_crypto_get_vhost(VirtIODevice *vdev) 1156 { 1157 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); 1158 CryptoDevBackend *b = vcrypto->cryptodev; 1159 CryptoDevBackendClient *cc = b->conf.peers.ccs[0]; 1160 CryptoDevBackendVhost *vhost_crypto = cryptodev_get_vhost(cc, b, 0); 1161 return &vhost_crypto->dev; 1162 } 1163 1164 static void virtio_crypto_class_init(ObjectClass *klass, void *data) 1165 { 1166 DeviceClass *dc = DEVICE_CLASS(klass); 1167 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); 1168 1169 device_class_set_props(dc, virtio_crypto_properties); 1170 dc->vmsd = &vmstate_virtio_crypto; 1171 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1172 vdc->realize = virtio_crypto_device_realize; 1173 vdc->unrealize = virtio_crypto_device_unrealize; 1174 vdc->get_config = virtio_crypto_get_config; 1175 vdc->get_features = virtio_crypto_get_features; 1176 vdc->reset = virtio_crypto_reset; 1177 vdc->set_status = virtio_crypto_set_status; 1178 vdc->guest_notifier_mask = virtio_crypto_guest_notifier_mask; 1179 vdc->guest_notifier_pending = virtio_crypto_guest_notifier_pending; 1180 vdc->get_vhost = virtio_crypto_get_vhost; 1181 } 1182 1183 static void virtio_crypto_instance_init(Object *obj) 1184 { 1185 VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(obj); 1186 1187 /* 1188 * The default config_size is sizeof(struct virtio_crypto_config). 1189 * Can be overriden with virtio_crypto_set_config_size. 1190 */ 1191 vcrypto->config_size = sizeof(struct virtio_crypto_config); 1192 } 1193 1194 static const TypeInfo virtio_crypto_info = { 1195 .name = TYPE_VIRTIO_CRYPTO, 1196 .parent = TYPE_VIRTIO_DEVICE, 1197 .instance_size = sizeof(VirtIOCrypto), 1198 .instance_init = virtio_crypto_instance_init, 1199 .class_init = virtio_crypto_class_init, 1200 }; 1201 1202 static void virtio_register_types(void) 1203 { 1204 type_register_static(&virtio_crypto_info); 1205 } 1206 1207 type_init(virtio_register_types) 1208