1 /* 2 * vhost-vdpa.c 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include "clients.h" 14 #include "hw/virtio/virtio-net.h" 15 #include "net/vhost_net.h" 16 #include "net/vhost-vdpa.h" 17 #include "hw/virtio/vhost-vdpa.h" 18 #include "qemu/config-file.h" 19 #include "qemu/error-report.h" 20 #include "qemu/log.h" 21 #include "qemu/memalign.h" 22 #include "qemu/option.h" 23 #include "qapi/error.h" 24 #include <linux/vhost.h> 25 #include <sys/ioctl.h> 26 #include <err.h> 27 #include "standard-headers/linux/virtio_net.h" 28 #include "monitor/monitor.h" 29 #include "migration/migration.h" 30 #include "migration/misc.h" 31 #include "hw/virtio/vhost.h" 32 #include "trace.h" 33 34 /* Todo:need to add the multiqueue support here */ 35 typedef struct VhostVDPAState { 36 NetClientState nc; 37 struct vhost_vdpa vhost_vdpa; 38 NotifierWithReturn migration_state; 39 VHostNetState *vhost_net; 40 41 /* Control commands shadow buffers */ 42 void *cvq_cmd_out_buffer; 43 virtio_net_ctrl_ack *status; 44 45 /* The device always have SVQ enabled */ 46 bool always_svq; 47 48 /* The device can isolate CVQ in its own ASID */ 49 bool cvq_isolated; 50 51 bool started; 52 } VhostVDPAState; 53 54 /* 55 * The array is sorted alphabetically in ascending order, 56 * with the exception of VHOST_INVALID_FEATURE_BIT, 57 * which should always be the last entry. 58 */ 59 const int vdpa_feature_bits[] = { 60 VIRTIO_F_ANY_LAYOUT, 61 VIRTIO_F_IOMMU_PLATFORM, 62 VIRTIO_F_NOTIFY_ON_EMPTY, 63 VIRTIO_F_RING_PACKED, 64 VIRTIO_F_RING_RESET, 65 VIRTIO_F_VERSION_1, 66 VIRTIO_NET_F_CSUM, 67 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, 68 VIRTIO_NET_F_CTRL_MAC_ADDR, 69 VIRTIO_NET_F_CTRL_RX, 70 VIRTIO_NET_F_CTRL_RX_EXTRA, 71 VIRTIO_NET_F_CTRL_VLAN, 72 VIRTIO_NET_F_CTRL_VQ, 73 VIRTIO_NET_F_GSO, 74 VIRTIO_NET_F_GUEST_CSUM, 75 VIRTIO_NET_F_GUEST_ECN, 76 VIRTIO_NET_F_GUEST_TSO4, 77 VIRTIO_NET_F_GUEST_TSO6, 78 VIRTIO_NET_F_GUEST_UFO, 79 VIRTIO_NET_F_GUEST_USO4, 80 VIRTIO_NET_F_GUEST_USO6, 81 VIRTIO_NET_F_HASH_REPORT, 82 VIRTIO_NET_F_HOST_ECN, 83 VIRTIO_NET_F_HOST_TSO4, 84 VIRTIO_NET_F_HOST_TSO6, 85 VIRTIO_NET_F_HOST_UFO, 86 VIRTIO_NET_F_HOST_USO, 87 VIRTIO_NET_F_MQ, 88 VIRTIO_NET_F_MRG_RXBUF, 89 VIRTIO_NET_F_MTU, 90 VIRTIO_NET_F_RSS, 91 VIRTIO_NET_F_STATUS, 92 VIRTIO_RING_F_EVENT_IDX, 93 VIRTIO_RING_F_INDIRECT_DESC, 94 95 /* VHOST_INVALID_FEATURE_BIT should always be the last entry */ 96 VHOST_INVALID_FEATURE_BIT 97 }; 98 99 /** Supported device specific feature bits with SVQ */ 100 static const uint64_t vdpa_svq_device_features = 101 BIT_ULL(VIRTIO_NET_F_CSUM) | 102 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | 103 BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | 104 BIT_ULL(VIRTIO_NET_F_MTU) | 105 BIT_ULL(VIRTIO_NET_F_MAC) | 106 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | 107 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | 108 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | 109 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | 110 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | 111 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | 112 BIT_ULL(VIRTIO_NET_F_HOST_ECN) | 113 BIT_ULL(VIRTIO_NET_F_HOST_UFO) | 114 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | 115 BIT_ULL(VIRTIO_NET_F_STATUS) | 116 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | 117 BIT_ULL(VIRTIO_NET_F_CTRL_RX) | 118 BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | 119 BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | 120 BIT_ULL(VIRTIO_NET_F_MQ) | 121 BIT_ULL(VIRTIO_F_ANY_LAYOUT) | 122 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | 123 /* VHOST_F_LOG_ALL is exposed by SVQ */ 124 BIT_ULL(VHOST_F_LOG_ALL) | 125 BIT_ULL(VIRTIO_NET_F_HASH_REPORT) | 126 BIT_ULL(VIRTIO_NET_F_RSS) | 127 BIT_ULL(VIRTIO_NET_F_RSC_EXT) | 128 BIT_ULL(VIRTIO_NET_F_STANDBY) | 129 BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); 130 131 #define VHOST_VDPA_NET_CVQ_ASID 1 132 133 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) 134 { 135 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 136 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 137 return s->vhost_net; 138 } 139 140 static size_t vhost_vdpa_net_cvq_cmd_len(void) 141 { 142 /* 143 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. 144 * In buffer is always 1 byte, so it should fit here 145 */ 146 return sizeof(struct virtio_net_ctrl_hdr) + 147 2 * sizeof(struct virtio_net_ctrl_mac) + 148 MAC_TABLE_ENTRIES * ETH_ALEN; 149 } 150 151 static size_t vhost_vdpa_net_cvq_cmd_page_len(void) 152 { 153 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); 154 } 155 156 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) 157 { 158 uint64_t invalid_dev_features = 159 features & ~vdpa_svq_device_features & 160 /* Transport are all accepted at this point */ 161 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, 162 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); 163 164 if (invalid_dev_features) { 165 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, 166 invalid_dev_features); 167 return false; 168 } 169 170 return vhost_svq_valid_features(features, errp); 171 } 172 173 static int vhost_vdpa_net_check_device_id(struct vhost_net *net) 174 { 175 uint32_t device_id; 176 int ret; 177 struct vhost_dev *hdev; 178 179 hdev = (struct vhost_dev *)&net->dev; 180 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); 181 if (device_id != VIRTIO_ID_NET) { 182 return -ENOTSUP; 183 } 184 return ret; 185 } 186 187 static int vhost_vdpa_add(NetClientState *ncs, void *be, 188 int queue_pair_index, int nvqs) 189 { 190 VhostNetOptions options; 191 struct vhost_net *net = NULL; 192 VhostVDPAState *s; 193 int ret; 194 195 options.backend_type = VHOST_BACKEND_TYPE_VDPA; 196 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 197 s = DO_UPCAST(VhostVDPAState, nc, ncs); 198 options.net_backend = ncs; 199 options.opaque = be; 200 options.busyloop_timeout = 0; 201 options.nvqs = nvqs; 202 203 net = vhost_net_init(&options); 204 if (!net) { 205 error_report("failed to init vhost_net for queue"); 206 goto err_init; 207 } 208 s->vhost_net = net; 209 ret = vhost_vdpa_net_check_device_id(net); 210 if (ret) { 211 goto err_check; 212 } 213 return 0; 214 err_check: 215 vhost_net_cleanup(net); 216 g_free(net); 217 err_init: 218 return -1; 219 } 220 221 static void vhost_vdpa_cleanup(NetClientState *nc) 222 { 223 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 224 225 /* 226 * If a peer NIC is attached, do not cleanup anything. 227 * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() 228 * when the guest is shutting down. 229 */ 230 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { 231 return; 232 } 233 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); 234 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); 235 if (s->vhost_net) { 236 vhost_net_cleanup(s->vhost_net); 237 g_free(s->vhost_net); 238 s->vhost_net = NULL; 239 } 240 if (s->vhost_vdpa.index != 0) { 241 return; 242 } 243 qemu_close(s->vhost_vdpa.shared->device_fd); 244 g_free(s->vhost_vdpa.shared); 245 } 246 247 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */ 248 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd) 249 { 250 return true; 251 } 252 253 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) 254 { 255 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 256 257 return true; 258 } 259 260 static bool vhost_vdpa_has_ufo(NetClientState *nc) 261 { 262 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 263 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 264 uint64_t features = 0; 265 features |= (1ULL << VIRTIO_NET_F_HOST_UFO); 266 features = vhost_net_get_features(s->vhost_net, features); 267 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); 268 269 } 270 271 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, 272 Error **errp) 273 { 274 const char *driver = object_class_get_name(oc); 275 276 if (!g_str_has_prefix(driver, "virtio-net-")) { 277 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); 278 return false; 279 } 280 281 return true; 282 } 283 284 /** Dummy receive in case qemu falls back to userland tap networking */ 285 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, 286 size_t size) 287 { 288 return size; 289 } 290 291 292 /** From any vdpa net client, get the netclient of the i-th queue pair */ 293 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i) 294 { 295 NICState *nic = qemu_get_nic(s->nc.peer); 296 NetClientState *nc_i = qemu_get_peer(nic->ncs, i); 297 298 return DO_UPCAST(VhostVDPAState, nc, nc_i); 299 } 300 301 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) 302 { 303 return vhost_vdpa_net_get_nc_vdpa(s, 0); 304 } 305 306 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) 307 { 308 struct vhost_vdpa *v = &s->vhost_vdpa; 309 VirtIONet *n; 310 VirtIODevice *vdev; 311 int data_queue_pairs, cvq, r; 312 313 /* We are only called on the first data vqs and only if x-svq is not set */ 314 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { 315 return; 316 } 317 318 vdev = v->dev->vdev; 319 n = VIRTIO_NET(vdev); 320 if (!n->vhost_started) { 321 return; 322 } 323 324 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; 325 cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? 326 n->max_ncs - n->max_queue_pairs : 0; 327 v->shared->svq_switching = enable ? 328 SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING; 329 /* 330 * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter 331 * in the future and resume the device if read-only operations between 332 * suspend and reset goes wrong. 333 */ 334 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); 335 336 /* Start will check migration setup_or_active to configure or not SVQ */ 337 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); 338 if (unlikely(r < 0)) { 339 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); 340 } 341 v->shared->svq_switching = SVQ_TSTATE_DONE; 342 } 343 344 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier, 345 MigrationEvent *e, Error **errp) 346 { 347 VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state); 348 349 if (e->type == MIG_EVENT_PRECOPY_SETUP) { 350 vhost_vdpa_net_log_global_enable(s, true); 351 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { 352 vhost_vdpa_net_log_global_enable(s, false); 353 } 354 return 0; 355 } 356 357 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) 358 { 359 struct vhost_vdpa *v = &s->vhost_vdpa; 360 361 migration_add_notifier(&s->migration_state, 362 vdpa_net_migration_state_notifier); 363 if (v->shadow_vqs_enabled) { 364 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 365 v->shared->iova_range.last); 366 } 367 } 368 369 static int vhost_vdpa_net_data_start(NetClientState *nc) 370 { 371 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 372 struct vhost_vdpa *v = &s->vhost_vdpa; 373 374 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 375 376 if (s->always_svq || 377 migration_is_setup_or_active(migrate_get_current()->state)) { 378 v->shadow_vqs_enabled = true; 379 } else { 380 v->shadow_vqs_enabled = false; 381 } 382 383 if (v->index == 0) { 384 v->shared->shadow_data = v->shadow_vqs_enabled; 385 vhost_vdpa_net_data_start_first(s); 386 return 0; 387 } 388 389 return 0; 390 } 391 392 static int vhost_vdpa_net_data_load(NetClientState *nc) 393 { 394 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 395 struct vhost_vdpa *v = &s->vhost_vdpa; 396 bool has_cvq = v->dev->vq_index_end % 2; 397 398 if (has_cvq) { 399 return 0; 400 } 401 402 for (int i = 0; i < v->dev->nvqs; ++i) { 403 vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); 404 } 405 return 0; 406 } 407 408 static void vhost_vdpa_net_client_stop(NetClientState *nc) 409 { 410 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 411 struct vhost_dev *dev; 412 413 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 414 415 if (s->vhost_vdpa.index == 0) { 416 migration_remove_notifier(&s->migration_state); 417 } 418 419 dev = s->vhost_vdpa.dev; 420 if (dev->vq_index + dev->nvqs == dev->vq_index_end) { 421 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, 422 vhost_iova_tree_delete); 423 } 424 } 425 426 static NetClientInfo net_vhost_vdpa_info = { 427 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 428 .size = sizeof(VhostVDPAState), 429 .receive = vhost_vdpa_receive, 430 .start = vhost_vdpa_net_data_start, 431 .load = vhost_vdpa_net_data_load, 432 .stop = vhost_vdpa_net_client_stop, 433 .cleanup = vhost_vdpa_cleanup, 434 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 435 .has_ufo = vhost_vdpa_has_ufo, 436 .check_peer_type = vhost_vdpa_check_peer_type, 437 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 438 }; 439 440 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, 441 Error **errp) 442 { 443 struct vhost_vring_state state = { 444 .index = vq_index, 445 }; 446 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); 447 448 if (unlikely(r < 0)) { 449 r = -errno; 450 error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); 451 return r; 452 } 453 454 return state.num; 455 } 456 457 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, 458 unsigned vq_group, 459 unsigned asid_num) 460 { 461 struct vhost_vring_state asid = { 462 .index = vq_group, 463 .num = asid_num, 464 }; 465 int r; 466 467 trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num); 468 469 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); 470 if (unlikely(r < 0)) { 471 error_report("Can't set vq group %u asid %u, errno=%d (%s)", 472 asid.index, asid.num, errno, g_strerror(errno)); 473 } 474 return r; 475 } 476 477 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) 478 { 479 VhostIOVATree *tree = v->shared->iova_tree; 480 DMAMap needle = { 481 /* 482 * No need to specify size or to look for more translations since 483 * this contiguous chunk was allocated by us. 484 */ 485 .translated_addr = (hwaddr)(uintptr_t)addr, 486 }; 487 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); 488 int r; 489 490 if (unlikely(!map)) { 491 error_report("Cannot locate expected map"); 492 return; 493 } 494 495 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, 496 map->size + 1); 497 if (unlikely(r != 0)) { 498 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); 499 } 500 501 vhost_iova_tree_remove(tree, *map); 502 } 503 504 /** Map CVQ buffer. */ 505 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, 506 bool write) 507 { 508 DMAMap map = {}; 509 int r; 510 511 map.translated_addr = (hwaddr)(uintptr_t)buf; 512 map.size = size - 1; 513 map.perm = write ? IOMMU_RW : IOMMU_RO, 514 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map); 515 if (unlikely(r != IOVA_OK)) { 516 error_report("Cannot map injected element"); 517 return r; 518 } 519 520 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, 521 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); 522 if (unlikely(r < 0)) { 523 goto dma_map_err; 524 } 525 526 return 0; 527 528 dma_map_err: 529 vhost_iova_tree_remove(v->shared->iova_tree, map); 530 return r; 531 } 532 533 static int vhost_vdpa_net_cvq_start(NetClientState *nc) 534 { 535 VhostVDPAState *s, *s0; 536 struct vhost_vdpa *v; 537 int64_t cvq_group; 538 int r; 539 Error *err = NULL; 540 541 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 542 543 s = DO_UPCAST(VhostVDPAState, nc, nc); 544 v = &s->vhost_vdpa; 545 546 s0 = vhost_vdpa_net_first_nc_vdpa(s); 547 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; 548 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; 549 550 if (v->shared->shadow_data) { 551 /* SVQ is already configured for all virtqueues */ 552 goto out; 553 } 554 555 /* 556 * If we early return in these cases SVQ will not be enabled. The migration 557 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. 558 */ 559 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { 560 return 0; 561 } 562 563 if (!s->cvq_isolated) { 564 return 0; 565 } 566 567 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, 568 v->dev->vq_index_end - 1, 569 &err); 570 if (unlikely(cvq_group < 0)) { 571 error_report_err(err); 572 return cvq_group; 573 } 574 575 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); 576 if (unlikely(r < 0)) { 577 return r; 578 } 579 580 v->shadow_vqs_enabled = true; 581 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; 582 583 out: 584 if (!s->vhost_vdpa.shadow_vqs_enabled) { 585 return 0; 586 } 587 588 /* 589 * If other vhost_vdpa already have an iova_tree, reuse it for simplicity, 590 * whether CVQ shares ASID with guest or not, because: 591 * - Memory listener need access to guest's memory addresses allocated in 592 * the IOVA tree. 593 * - There should be plenty of IOVA address space for both ASID not to 594 * worry about collisions between them. Guest's translations are still 595 * validated with virtio virtqueue_pop so there is no risk for the guest 596 * to access memory that it shouldn't. 597 * 598 * To allocate a iova tree per ASID is doable but it complicates the code 599 * and it is not worth it for the moment. 600 */ 601 if (!v->shared->iova_tree) { 602 v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first, 603 v->shared->iova_range.last); 604 } 605 606 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, 607 vhost_vdpa_net_cvq_cmd_page_len(), false); 608 if (unlikely(r < 0)) { 609 return r; 610 } 611 612 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, 613 vhost_vdpa_net_cvq_cmd_page_len(), true); 614 if (unlikely(r < 0)) { 615 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 616 } 617 618 return r; 619 } 620 621 static void vhost_vdpa_net_cvq_stop(NetClientState *nc) 622 { 623 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 624 625 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 626 627 if (s->vhost_vdpa.shadow_vqs_enabled) { 628 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 629 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); 630 } 631 632 vhost_vdpa_net_client_stop(nc); 633 } 634 635 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, 636 const struct iovec *out_sg, size_t out_num, 637 const struct iovec *in_sg, size_t in_num) 638 { 639 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 640 int r; 641 642 r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); 643 if (unlikely(r != 0)) { 644 if (unlikely(r == -ENOSPC)) { 645 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", 646 __func__); 647 } 648 } 649 650 return r; 651 } 652 653 /* 654 * Convenience wrapper to poll SVQ for multiple control commands. 655 * 656 * Caller should hold the BQL when invoking this function, and should take 657 * the answer before SVQ pulls by itself when BQL is released. 658 */ 659 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) 660 { 661 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 662 return vhost_svq_poll(svq, cmds_in_flight); 663 } 664 665 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, 666 struct iovec *out_cursor, 667 struct iovec *in_cursor) 668 { 669 /* reset the cursor of the output buffer for the device */ 670 out_cursor->iov_base = s->cvq_cmd_out_buffer; 671 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 672 673 /* reset the cursor of the in buffer for the device */ 674 in_cursor->iov_base = s->status; 675 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 676 } 677 678 /* 679 * Poll SVQ for multiple pending control commands and check the device's ack. 680 * 681 * Caller should hold the BQL when invoking this function. 682 * 683 * @s: The VhostVDPAState 684 * @len: The length of the pending status shadow buffer 685 */ 686 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) 687 { 688 /* device uses a one-byte length ack for each control command */ 689 ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); 690 if (unlikely(dev_written != len)) { 691 return -EIO; 692 } 693 694 /* check the device's ack */ 695 for (int i = 0; i < len; ++i) { 696 if (s->status[i] != VIRTIO_NET_OK) { 697 return -EIO; 698 } 699 } 700 return 0; 701 } 702 703 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 704 struct iovec *out_cursor, 705 struct iovec *in_cursor, uint8_t class, 706 uint8_t cmd, const struct iovec *data_sg, 707 size_t data_num) 708 { 709 const struct virtio_net_ctrl_hdr ctrl = { 710 .class = class, 711 .cmd = cmd, 712 }; 713 size_t data_size = iov_size(data_sg, data_num), cmd_size; 714 struct iovec out, in; 715 ssize_t r; 716 unsigned dummy_cursor_iov_cnt; 717 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 718 719 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); 720 cmd_size = sizeof(ctrl) + data_size; 721 trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size); 722 if (vhost_svq_available_slots(svq) < 2 || 723 iov_size(out_cursor, 1) < cmd_size) { 724 /* 725 * It is time to flush all pending control commands if SVQ is full 726 * or control commands shadow buffers are full. 727 * 728 * We can poll here since we've had BQL from the time 729 * we sent the descriptor. 730 */ 731 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - 732 (void *)s->status); 733 if (unlikely(r < 0)) { 734 return r; 735 } 736 737 vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); 738 } 739 740 /* pack the CVQ command header */ 741 iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); 742 /* pack the CVQ command command-specific-data */ 743 iov_to_buf(data_sg, data_num, 0, 744 out_cursor->iov_base + sizeof(ctrl), data_size); 745 746 /* extract the required buffer from the cursor for output */ 747 iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); 748 /* extract the required buffer from the cursor for input */ 749 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); 750 751 r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); 752 if (unlikely(r < 0)) { 753 trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r); 754 return r; 755 } 756 757 /* iterate the cursors */ 758 dummy_cursor_iov_cnt = 1; 759 iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); 760 dummy_cursor_iov_cnt = 1; 761 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); 762 763 return 0; 764 } 765 766 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, 767 struct iovec *out_cursor, 768 struct iovec *in_cursor) 769 { 770 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 771 const struct iovec data = { 772 .iov_base = (void *)n->mac, 773 .iov_len = sizeof(n->mac), 774 }; 775 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 776 VIRTIO_NET_CTRL_MAC, 777 VIRTIO_NET_CTRL_MAC_ADDR_SET, 778 &data, 1); 779 if (unlikely(r < 0)) { 780 return r; 781 } 782 } 783 784 /* 785 * According to VirtIO standard, "The device MUST have an 786 * empty MAC filtering table on reset.". 787 * 788 * Therefore, there is no need to send this CVQ command if the 789 * driver also sets an empty MAC filter table, which aligns with 790 * the device's defaults. 791 * 792 * Note that the device's defaults can mismatch the driver's 793 * configuration only at live migration. 794 */ 795 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || 796 n->mac_table.in_use == 0) { 797 return 0; 798 } 799 800 uint32_t uni_entries = n->mac_table.first_multi, 801 uni_macs_size = uni_entries * ETH_ALEN, 802 mul_entries = n->mac_table.in_use - uni_entries, 803 mul_macs_size = mul_entries * ETH_ALEN; 804 struct virtio_net_ctrl_mac uni = { 805 .entries = cpu_to_le32(uni_entries), 806 }; 807 struct virtio_net_ctrl_mac mul = { 808 .entries = cpu_to_le32(mul_entries), 809 }; 810 const struct iovec data[] = { 811 { 812 .iov_base = &uni, 813 .iov_len = sizeof(uni), 814 }, { 815 .iov_base = n->mac_table.macs, 816 .iov_len = uni_macs_size, 817 }, { 818 .iov_base = &mul, 819 .iov_len = sizeof(mul), 820 }, { 821 .iov_base = &n->mac_table.macs[uni_macs_size], 822 .iov_len = mul_macs_size, 823 }, 824 }; 825 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 826 VIRTIO_NET_CTRL_MAC, 827 VIRTIO_NET_CTRL_MAC_TABLE_SET, 828 data, ARRAY_SIZE(data)); 829 if (unlikely(r < 0)) { 830 return r; 831 } 832 833 return 0; 834 } 835 836 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n, 837 struct iovec *out_cursor, 838 struct iovec *in_cursor, bool do_rss) 839 { 840 struct virtio_net_rss_config cfg = {}; 841 ssize_t r; 842 g_autofree uint16_t *table = NULL; 843 844 /* 845 * According to VirtIO standard, "Initially the device has all hash 846 * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.". 847 * 848 * Therefore, there is no need to send this CVQ command if the 849 * driver disables the all hash types, which aligns with 850 * the device's defaults. 851 * 852 * Note that the device's defaults can mismatch the driver's 853 * configuration only at live migration. 854 */ 855 if (!n->rss_data.enabled || 856 n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { 857 return 0; 858 } 859 860 table = g_malloc_n(n->rss_data.indirections_len, 861 sizeof(n->rss_data.indirections_table[0])); 862 cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); 863 864 if (do_rss) { 865 /* 866 * According to VirtIO standard, "Number of entries in indirection_table 867 * is (indirection_table_mask + 1)". 868 */ 869 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - 870 1); 871 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); 872 for (int i = 0; i < n->rss_data.indirections_len; ++i) { 873 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); 874 } 875 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); 876 } else { 877 /* 878 * According to VirtIO standard, "Field reserved MUST contain zeroes. 879 * It is defined to make the structure to match the layout of 880 * virtio_net_rss_config structure, defined in 5.1.6.5.7.". 881 * 882 * Therefore, we need to zero the fields in 883 * struct virtio_net_rss_config, which corresponds to the 884 * `reserved` field in struct virtio_net_hash_config. 885 * 886 * Note that all other fields are zeroed at their definitions, 887 * except for the `indirection_table` field, where the actual data 888 * is stored in the `table` variable to ensure compatibility 889 * with RSS case. Therefore, we need to zero the `table` variable here. 890 */ 891 table[0] = 0; 892 } 893 894 /* 895 * Considering that virtio_net_handle_rss() currently does not restore 896 * the hash key length parsed from the CVQ command sent from the guest 897 * into n->rss_data and uses the maximum key length in other code, so 898 * we also employ the maximum key length here. 899 */ 900 cfg.hash_key_length = sizeof(n->rss_data.key); 901 902 const struct iovec data[] = { 903 { 904 .iov_base = &cfg, 905 .iov_len = offsetof(struct virtio_net_rss_config, 906 indirection_table), 907 }, { 908 .iov_base = table, 909 .iov_len = n->rss_data.indirections_len * 910 sizeof(n->rss_data.indirections_table[0]), 911 }, { 912 .iov_base = &cfg.max_tx_vq, 913 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - 914 offsetof(struct virtio_net_rss_config, max_tx_vq), 915 }, { 916 .iov_base = (void *)n->rss_data.key, 917 .iov_len = sizeof(n->rss_data.key), 918 } 919 }; 920 921 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 922 VIRTIO_NET_CTRL_MQ, 923 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG : 924 VIRTIO_NET_CTRL_MQ_HASH_CONFIG, 925 data, ARRAY_SIZE(data)); 926 if (unlikely(r < 0)) { 927 return r; 928 } 929 930 return 0; 931 } 932 933 static int vhost_vdpa_net_load_mq(VhostVDPAState *s, 934 const VirtIONet *n, 935 struct iovec *out_cursor, 936 struct iovec *in_cursor) 937 { 938 struct virtio_net_ctrl_mq mq; 939 ssize_t r; 940 941 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { 942 return 0; 943 } 944 945 trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs); 946 947 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); 948 const struct iovec data = { 949 .iov_base = &mq, 950 .iov_len = sizeof(mq), 951 }; 952 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 953 VIRTIO_NET_CTRL_MQ, 954 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 955 &data, 1); 956 if (unlikely(r < 0)) { 957 return r; 958 } 959 960 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { 961 /* load the receive-side scaling state */ 962 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true); 963 if (unlikely(r < 0)) { 964 return r; 965 } 966 } else if (virtio_vdev_has_feature(&n->parent_obj, 967 VIRTIO_NET_F_HASH_REPORT)) { 968 /* load the hash calculation state */ 969 r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false); 970 if (unlikely(r < 0)) { 971 return r; 972 } 973 } 974 975 return 0; 976 } 977 978 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, 979 const VirtIONet *n, 980 struct iovec *out_cursor, 981 struct iovec *in_cursor) 982 { 983 uint64_t offloads; 984 ssize_t r; 985 986 if (!virtio_vdev_has_feature(&n->parent_obj, 987 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 988 return 0; 989 } 990 991 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { 992 /* 993 * According to VirtIO standard, "Upon feature negotiation 994 * corresponding offload gets enabled to preserve 995 * backward compatibility.". 996 * 997 * Therefore, there is no need to send this CVQ command if the 998 * driver also enables all supported offloads, which aligns with 999 * the device's defaults. 1000 * 1001 * Note that the device's defaults can mismatch the driver's 1002 * configuration only at live migration. 1003 */ 1004 return 0; 1005 } 1006 1007 offloads = cpu_to_le64(n->curr_guest_offloads); 1008 const struct iovec data = { 1009 .iov_base = &offloads, 1010 .iov_len = sizeof(offloads), 1011 }; 1012 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1013 VIRTIO_NET_CTRL_GUEST_OFFLOADS, 1014 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, 1015 &data, 1); 1016 if (unlikely(r < 0)) { 1017 return r; 1018 } 1019 1020 return 0; 1021 } 1022 1023 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, 1024 struct iovec *out_cursor, 1025 struct iovec *in_cursor, 1026 uint8_t cmd, 1027 uint8_t on) 1028 { 1029 const struct iovec data = { 1030 .iov_base = &on, 1031 .iov_len = sizeof(on), 1032 }; 1033 ssize_t r; 1034 1035 r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1036 VIRTIO_NET_CTRL_RX, cmd, &data, 1); 1037 if (unlikely(r < 0)) { 1038 return r; 1039 } 1040 1041 return 0; 1042 } 1043 1044 static int vhost_vdpa_net_load_rx(VhostVDPAState *s, 1045 const VirtIONet *n, 1046 struct iovec *out_cursor, 1047 struct iovec *in_cursor) 1048 { 1049 ssize_t r; 1050 1051 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { 1052 return 0; 1053 } 1054 1055 /* 1056 * According to virtio_net_reset(), device turns promiscuous mode 1057 * on by default. 1058 * 1059 * Additionally, according to VirtIO standard, "Since there are 1060 * no guarantees, it can use a hash filter or silently switch to 1061 * allmulti or promiscuous mode if it is given too many addresses.". 1062 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many 1063 * non-multicast MAC addresses, indicating that promiscuous mode 1064 * should be enabled. 1065 * 1066 * Therefore, QEMU should only send this CVQ command if the 1067 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, 1068 * which sets promiscuous mode on, different from the device's defaults. 1069 * 1070 * Note that the device's defaults can mismatch the driver's 1071 * configuration only at live migration. 1072 */ 1073 if (!n->mac_table.uni_overflow && !n->promisc) { 1074 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1075 VIRTIO_NET_CTRL_RX_PROMISC, 0); 1076 if (unlikely(r < 0)) { 1077 return r; 1078 } 1079 } 1080 1081 /* 1082 * According to virtio_net_reset(), device turns all-multicast mode 1083 * off by default. 1084 * 1085 * According to VirtIO standard, "Since there are no guarantees, 1086 * it can use a hash filter or silently switch to allmulti or 1087 * promiscuous mode if it is given too many addresses.". QEMU marks 1088 * `n->mac_table.multi_overflow` if guest sets too many 1089 * non-multicast MAC addresses. 1090 * 1091 * Therefore, QEMU should only send this CVQ command if the 1092 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, 1093 * which sets all-multicast mode on, different from the device's defaults. 1094 * 1095 * Note that the device's defaults can mismatch the driver's 1096 * configuration only at live migration. 1097 */ 1098 if (n->mac_table.multi_overflow || n->allmulti) { 1099 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1100 VIRTIO_NET_CTRL_RX_ALLMULTI, 1); 1101 if (unlikely(r < 0)) { 1102 return r; 1103 } 1104 } 1105 1106 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { 1107 return 0; 1108 } 1109 1110 /* 1111 * According to virtio_net_reset(), device turns all-unicast mode 1112 * off by default. 1113 * 1114 * Therefore, QEMU should only send this CVQ command if the driver 1115 * sets all-unicast mode on, different from the device's defaults. 1116 * 1117 * Note that the device's defaults can mismatch the driver's 1118 * configuration only at live migration. 1119 */ 1120 if (n->alluni) { 1121 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1122 VIRTIO_NET_CTRL_RX_ALLUNI, 1); 1123 if (r < 0) { 1124 return r; 1125 } 1126 } 1127 1128 /* 1129 * According to virtio_net_reset(), device turns non-multicast mode 1130 * off by default. 1131 * 1132 * Therefore, QEMU should only send this CVQ command if the driver 1133 * sets non-multicast mode on, different from the device's defaults. 1134 * 1135 * Note that the device's defaults can mismatch the driver's 1136 * configuration only at live migration. 1137 */ 1138 if (n->nomulti) { 1139 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1140 VIRTIO_NET_CTRL_RX_NOMULTI, 1); 1141 if (r < 0) { 1142 return r; 1143 } 1144 } 1145 1146 /* 1147 * According to virtio_net_reset(), device turns non-unicast mode 1148 * off by default. 1149 * 1150 * Therefore, QEMU should only send this CVQ command if the driver 1151 * sets non-unicast mode on, different from the device's defaults. 1152 * 1153 * Note that the device's defaults can mismatch the driver's 1154 * configuration only at live migration. 1155 */ 1156 if (n->nouni) { 1157 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1158 VIRTIO_NET_CTRL_RX_NOUNI, 1); 1159 if (r < 0) { 1160 return r; 1161 } 1162 } 1163 1164 /* 1165 * According to virtio_net_reset(), device turns non-broadcast mode 1166 * off by default. 1167 * 1168 * Therefore, QEMU should only send this CVQ command if the driver 1169 * sets non-broadcast mode on, different from the device's defaults. 1170 * 1171 * Note that the device's defaults can mismatch the driver's 1172 * configuration only at live migration. 1173 */ 1174 if (n->nobcast) { 1175 r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 1176 VIRTIO_NET_CTRL_RX_NOBCAST, 1); 1177 if (r < 0) { 1178 return r; 1179 } 1180 } 1181 1182 return 0; 1183 } 1184 1185 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, 1186 const VirtIONet *n, 1187 struct iovec *out_cursor, 1188 struct iovec *in_cursor, 1189 uint16_t vid) 1190 { 1191 const struct iovec data = { 1192 .iov_base = &vid, 1193 .iov_len = sizeof(vid), 1194 }; 1195 ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 1196 VIRTIO_NET_CTRL_VLAN, 1197 VIRTIO_NET_CTRL_VLAN_ADD, 1198 &data, 1); 1199 if (unlikely(r < 0)) { 1200 return r; 1201 } 1202 1203 return 0; 1204 } 1205 1206 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, 1207 const VirtIONet *n, 1208 struct iovec *out_cursor, 1209 struct iovec *in_cursor) 1210 { 1211 int r; 1212 1213 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { 1214 return 0; 1215 } 1216 1217 for (int i = 0; i < MAX_VLAN >> 5; i++) { 1218 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { 1219 if (n->vlans[i] & (1U << j)) { 1220 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, 1221 in_cursor, (i << 5) + j); 1222 if (unlikely(r != 0)) { 1223 return r; 1224 } 1225 } 1226 } 1227 } 1228 1229 return 0; 1230 } 1231 1232 static int vhost_vdpa_net_cvq_load(NetClientState *nc) 1233 { 1234 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 1235 struct vhost_vdpa *v = &s->vhost_vdpa; 1236 const VirtIONet *n; 1237 int r; 1238 struct iovec out_cursor, in_cursor; 1239 1240 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1241 1242 vhost_vdpa_set_vring_ready(v, v->dev->vq_index); 1243 1244 if (v->shadow_vqs_enabled) { 1245 n = VIRTIO_NET(v->dev->vdev); 1246 vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); 1247 r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); 1248 if (unlikely(r < 0)) { 1249 return r; 1250 } 1251 r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); 1252 if (unlikely(r)) { 1253 return r; 1254 } 1255 r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); 1256 if (unlikely(r)) { 1257 return r; 1258 } 1259 r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); 1260 if (unlikely(r)) { 1261 return r; 1262 } 1263 r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); 1264 if (unlikely(r)) { 1265 return r; 1266 } 1267 1268 /* 1269 * We need to poll and check all pending device's used buffers. 1270 * 1271 * We can poll here since we've had BQL from the time 1272 * we sent the descriptor. 1273 */ 1274 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); 1275 if (unlikely(r)) { 1276 return r; 1277 } 1278 } 1279 1280 for (int i = 0; i < v->dev->vq_index; ++i) { 1281 vhost_vdpa_set_vring_ready(v, i); 1282 } 1283 1284 return 0; 1285 } 1286 1287 static NetClientInfo net_vhost_vdpa_cvq_info = { 1288 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 1289 .size = sizeof(VhostVDPAState), 1290 .receive = vhost_vdpa_receive, 1291 .start = vhost_vdpa_net_cvq_start, 1292 .load = vhost_vdpa_net_cvq_load, 1293 .stop = vhost_vdpa_net_cvq_stop, 1294 .cleanup = vhost_vdpa_cleanup, 1295 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 1296 .has_ufo = vhost_vdpa_has_ufo, 1297 .check_peer_type = vhost_vdpa_check_peer_type, 1298 .set_steering_ebpf = vhost_vdpa_set_steering_ebpf, 1299 }; 1300 1301 /* 1302 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to 1303 * vdpa device. 1304 * 1305 * Considering that QEMU cannot send the entire filter table to the 1306 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ 1307 * command to enable promiscuous mode to receive all packets, 1308 * according to VirtIO standard, "Since there are no guarantees, 1309 * it can use a hash filter or silently switch to allmulti or 1310 * promiscuous mode if it is given too many addresses.". 1311 * 1312 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and 1313 * marks `n->mac_table.x_overflow` accordingly, it should have 1314 * the same effect on the device model to receive 1315 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses. 1316 * The same applies to multicast MAC addresses. 1317 * 1318 * Therefore, QEMU can provide the device model with a fake 1319 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1) 1320 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast 1321 * MAC addresses. This ensures that the device model marks 1322 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`, 1323 * allowing all packets to be received, which aligns with the 1324 * state of the vdpa device. 1325 */ 1326 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, 1327 VirtQueueElement *elem, 1328 struct iovec *out, 1329 const struct iovec *in) 1330 { 1331 struct virtio_net_ctrl_mac mac_data, *mac_ptr; 1332 struct virtio_net_ctrl_hdr *hdr_ptr; 1333 uint32_t cursor; 1334 ssize_t r; 1335 uint8_t on = 1; 1336 1337 /* parse the non-multicast MAC address entries from CVQ command */ 1338 cursor = sizeof(*hdr_ptr); 1339 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1340 &mac_data, sizeof(mac_data)); 1341 if (unlikely(r != sizeof(mac_data))) { 1342 /* 1343 * If the CVQ command is invalid, we should simulate the vdpa device 1344 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1345 */ 1346 *s->status = VIRTIO_NET_ERR; 1347 return sizeof(*s->status); 1348 } 1349 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1350 1351 /* parse the multicast MAC address entries from CVQ command */ 1352 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1353 &mac_data, sizeof(mac_data)); 1354 if (r != sizeof(mac_data)) { 1355 /* 1356 * If the CVQ command is invalid, we should simulate the vdpa device 1357 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1358 */ 1359 *s->status = VIRTIO_NET_ERR; 1360 return sizeof(*s->status); 1361 } 1362 cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1363 1364 /* validate the CVQ command */ 1365 if (iov_size(elem->out_sg, elem->out_num) != cursor) { 1366 /* 1367 * If the CVQ command is invalid, we should simulate the vdpa device 1368 * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1369 */ 1370 *s->status = VIRTIO_NET_ERR; 1371 return sizeof(*s->status); 1372 } 1373 1374 /* 1375 * According to VirtIO standard, "Since there are no guarantees, 1376 * it can use a hash filter or silently switch to allmulti or 1377 * promiscuous mode if it is given too many addresses.". 1378 * 1379 * Therefore, considering that QEMU is unable to send the entire 1380 * filter table to the vdpa device, it should send the 1381 * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode 1382 */ 1383 hdr_ptr = out->iov_base; 1384 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); 1385 1386 hdr_ptr->class = VIRTIO_NET_CTRL_RX; 1387 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; 1388 iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); 1389 r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); 1390 if (unlikely(r < 0)) { 1391 return r; 1392 } 1393 1394 /* 1395 * We can poll here since we've had BQL from the time 1396 * we sent the descriptor. 1397 */ 1398 r = vhost_vdpa_net_svq_poll(s, 1); 1399 if (unlikely(r < sizeof(*s->status))) { 1400 return r; 1401 } 1402 if (*s->status != VIRTIO_NET_OK) { 1403 return sizeof(*s->status); 1404 } 1405 1406 /* 1407 * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ 1408 * command to the device model, including (`MAC_TABLE_ENTRIES` + 1) 1409 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) 1410 * multicast MAC addresses. 1411 * 1412 * By doing so, the device model can mark `n->mac_table.uni_overflow` 1413 * and `n->mac_table.multi_overflow`, enabling all packets to be 1414 * received, which aligns with the state of the vdpa device. 1415 */ 1416 cursor = 0; 1417 uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1, 1418 fake_mul_entries = MAC_TABLE_ENTRIES + 1, 1419 fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) + 1420 sizeof(mac_data) + fake_uni_entries * ETH_ALEN + 1421 sizeof(mac_data) + fake_mul_entries * ETH_ALEN; 1422 1423 assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len()); 1424 out->iov_len = fake_cvq_size; 1425 1426 /* pack the header for fake CVQ command */ 1427 hdr_ptr = out->iov_base + cursor; 1428 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; 1429 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1430 cursor += sizeof(*hdr_ptr); 1431 1432 /* 1433 * Pack the non-multicast MAC addresses part for fake CVQ command. 1434 * 1435 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1436 * addresses provided in CVQ command. Therefore, only the entries 1437 * field need to be prepared in the CVQ command. 1438 */ 1439 mac_ptr = out->iov_base + cursor; 1440 mac_ptr->entries = cpu_to_le32(fake_uni_entries); 1441 cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN; 1442 1443 /* 1444 * Pack the multicast MAC addresses part for fake CVQ command. 1445 * 1446 * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 1447 * addresses provided in CVQ command. Therefore, only the entries 1448 * field need to be prepared in the CVQ command. 1449 */ 1450 mac_ptr = out->iov_base + cursor; 1451 mac_ptr->entries = cpu_to_le32(fake_mul_entries); 1452 1453 /* 1454 * Simulating QEMU poll a vdpa device used buffer 1455 * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1456 */ 1457 return sizeof(*s->status); 1458 } 1459 1460 /** 1461 * Validate and copy control virtqueue commands. 1462 * 1463 * Following QEMU guidelines, we offer a copy of the buffers to the device to 1464 * prevent TOCTOU bugs. 1465 */ 1466 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, 1467 VirtQueueElement *elem, 1468 void *opaque) 1469 { 1470 VhostVDPAState *s = opaque; 1471 size_t in_len; 1472 const struct virtio_net_ctrl_hdr *ctrl; 1473 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 1474 /* Out buffer sent to both the vdpa device and the device model */ 1475 struct iovec out = { 1476 .iov_base = s->cvq_cmd_out_buffer, 1477 }; 1478 /* in buffer used for device model */ 1479 const struct iovec model_in = { 1480 .iov_base = &status, 1481 .iov_len = sizeof(status), 1482 }; 1483 /* in buffer used for vdpa device */ 1484 const struct iovec vdpa_in = { 1485 .iov_base = s->status, 1486 .iov_len = sizeof(*s->status), 1487 }; 1488 ssize_t dev_written = -EINVAL; 1489 1490 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, 1491 s->cvq_cmd_out_buffer, 1492 vhost_vdpa_net_cvq_cmd_page_len()); 1493 1494 ctrl = s->cvq_cmd_out_buffer; 1495 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { 1496 /* 1497 * Guest announce capability is emulated by qemu, so don't forward to 1498 * the device. 1499 */ 1500 dev_written = sizeof(status); 1501 *s->status = VIRTIO_NET_OK; 1502 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && 1503 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && 1504 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { 1505 /* 1506 * Due to the size limitation of the out buffer sent to the vdpa device, 1507 * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive 1508 * MAC addresses set by the driver for the filter table can cause 1509 * truncation of the CVQ command in QEMU. As a result, the vdpa device 1510 * rejects the flawed CVQ command. 1511 * 1512 * Therefore, QEMU must handle this situation instead of sending 1513 * the CVQ command directly. 1514 */ 1515 dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, 1516 &out, &vdpa_in); 1517 if (unlikely(dev_written < 0)) { 1518 goto out; 1519 } 1520 } else { 1521 ssize_t r; 1522 r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); 1523 if (unlikely(r < 0)) { 1524 dev_written = r; 1525 goto out; 1526 } 1527 1528 /* 1529 * We can poll here since we've had BQL from the time 1530 * we sent the descriptor. 1531 */ 1532 dev_written = vhost_vdpa_net_svq_poll(s, 1); 1533 } 1534 1535 if (unlikely(dev_written < sizeof(status))) { 1536 error_report("Insufficient written data (%zu)", dev_written); 1537 goto out; 1538 } 1539 1540 if (*s->status != VIRTIO_NET_OK) { 1541 goto out; 1542 } 1543 1544 status = VIRTIO_NET_ERR; 1545 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); 1546 if (status != VIRTIO_NET_OK) { 1547 error_report("Bad CVQ processing in model"); 1548 } 1549 1550 out: 1551 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, 1552 sizeof(status)); 1553 if (unlikely(in_len < sizeof(status))) { 1554 error_report("Bad device CVQ written length"); 1555 } 1556 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); 1557 /* 1558 * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when 1559 * the function successfully forwards the CVQ command, indicated 1560 * by a non-negative value of `dev_written`. Otherwise, it still 1561 * belongs to SVQ. 1562 * This function should only free the `elem` when it owns. 1563 */ 1564 if (dev_written >= 0) { 1565 g_free(elem); 1566 } 1567 return dev_written < 0 ? dev_written : 0; 1568 } 1569 1570 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { 1571 .avail_handler = vhost_vdpa_net_handle_ctrl_avail, 1572 }; 1573 1574 /** 1575 * Probe if CVQ is isolated 1576 * 1577 * @device_fd The vdpa device fd 1578 * @features Features offered by the device. 1579 * @cvq_index The control vq pair index 1580 * 1581 * Returns <0 in case of failure, 0 if false and 1 if true. 1582 */ 1583 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, 1584 int cvq_index, Error **errp) 1585 { 1586 uint64_t backend_features; 1587 int64_t cvq_group; 1588 uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | 1589 VIRTIO_CONFIG_S_DRIVER; 1590 int r; 1591 1592 ERRP_GUARD(); 1593 1594 r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); 1595 if (unlikely(r < 0)) { 1596 error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); 1597 return r; 1598 } 1599 1600 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { 1601 return 0; 1602 } 1603 1604 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1605 if (unlikely(r)) { 1606 error_setg_errno(errp, -r, "Cannot set device status"); 1607 goto out; 1608 } 1609 1610 r = ioctl(device_fd, VHOST_SET_FEATURES, &features); 1611 if (unlikely(r)) { 1612 error_setg_errno(errp, -r, "Cannot set features"); 1613 goto out; 1614 } 1615 1616 status |= VIRTIO_CONFIG_S_FEATURES_OK; 1617 r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1618 if (unlikely(r)) { 1619 error_setg_errno(errp, -r, "Cannot set device status"); 1620 goto out; 1621 } 1622 1623 cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); 1624 if (unlikely(cvq_group < 0)) { 1625 if (cvq_group != -ENOTSUP) { 1626 r = cvq_group; 1627 goto out; 1628 } 1629 1630 /* 1631 * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend 1632 * support ASID even if the parent driver does not. The CVQ cannot be 1633 * isolated in this case. 1634 */ 1635 error_free(*errp); 1636 *errp = NULL; 1637 r = 0; 1638 goto out; 1639 } 1640 1641 for (int i = 0; i < cvq_index; ++i) { 1642 int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); 1643 if (unlikely(group < 0)) { 1644 r = group; 1645 goto out; 1646 } 1647 1648 if (group == (int64_t)cvq_group) { 1649 r = 0; 1650 goto out; 1651 } 1652 } 1653 1654 r = 1; 1655 1656 out: 1657 status = 0; 1658 ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1659 return r; 1660 } 1661 1662 static NetClientState *net_vhost_vdpa_init(NetClientState *peer, 1663 const char *device, 1664 const char *name, 1665 int vdpa_device_fd, 1666 int queue_pair_index, 1667 int nvqs, 1668 bool is_datapath, 1669 bool svq, 1670 struct vhost_vdpa_iova_range iova_range, 1671 uint64_t features, 1672 VhostVDPAShared *shared, 1673 Error **errp) 1674 { 1675 NetClientState *nc = NULL; 1676 VhostVDPAState *s; 1677 int ret = 0; 1678 assert(name); 1679 int cvq_isolated = 0; 1680 1681 if (is_datapath) { 1682 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, 1683 name); 1684 } else { 1685 cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, 1686 queue_pair_index * 2, 1687 errp); 1688 if (unlikely(cvq_isolated < 0)) { 1689 return NULL; 1690 } 1691 1692 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, 1693 device, name); 1694 } 1695 qemu_set_info_str(nc, TYPE_VHOST_VDPA); 1696 s = DO_UPCAST(VhostVDPAState, nc, nc); 1697 1698 s->vhost_vdpa.index = queue_pair_index; 1699 s->always_svq = svq; 1700 s->migration_state.notify = NULL; 1701 s->vhost_vdpa.shadow_vqs_enabled = svq; 1702 if (queue_pair_index == 0) { 1703 vhost_vdpa_net_valid_svq_features(features, 1704 &s->vhost_vdpa.migration_blocker); 1705 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); 1706 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; 1707 s->vhost_vdpa.shared->iova_range = iova_range; 1708 s->vhost_vdpa.shared->shadow_data = svq; 1709 } else if (!is_datapath) { 1710 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1711 PROT_READ | PROT_WRITE, 1712 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1713 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1714 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 1715 -1, 0); 1716 1717 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; 1718 s->vhost_vdpa.shadow_vq_ops_opaque = s; 1719 s->cvq_isolated = cvq_isolated; 1720 } 1721 if (queue_pair_index != 0) { 1722 s->vhost_vdpa.shared = shared; 1723 } 1724 1725 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); 1726 if (ret) { 1727 qemu_del_net_client(nc); 1728 return NULL; 1729 } 1730 1731 return nc; 1732 } 1733 1734 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) 1735 { 1736 int ret = ioctl(fd, VHOST_GET_FEATURES, features); 1737 if (unlikely(ret < 0)) { 1738 error_setg_errno(errp, errno, 1739 "Fail to query features from vhost-vDPA device"); 1740 } 1741 return ret; 1742 } 1743 1744 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, 1745 int *has_cvq, Error **errp) 1746 { 1747 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 1748 g_autofree struct vhost_vdpa_config *config = NULL; 1749 __virtio16 *max_queue_pairs; 1750 int ret; 1751 1752 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { 1753 *has_cvq = 1; 1754 } else { 1755 *has_cvq = 0; 1756 } 1757 1758 if (features & (1 << VIRTIO_NET_F_MQ)) { 1759 config = g_malloc0(config_size + sizeof(*max_queue_pairs)); 1760 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); 1761 config->len = sizeof(*max_queue_pairs); 1762 1763 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); 1764 if (ret) { 1765 error_setg(errp, "Fail to get config from vhost-vDPA device"); 1766 return -ret; 1767 } 1768 1769 max_queue_pairs = (__virtio16 *)&config->buf; 1770 1771 return lduw_le_p(max_queue_pairs); 1772 } 1773 1774 return 1; 1775 } 1776 1777 int net_init_vhost_vdpa(const Netdev *netdev, const char *name, 1778 NetClientState *peer, Error **errp) 1779 { 1780 const NetdevVhostVDPAOptions *opts; 1781 uint64_t features; 1782 int vdpa_device_fd; 1783 g_autofree NetClientState **ncs = NULL; 1784 struct vhost_vdpa_iova_range iova_range; 1785 NetClientState *nc; 1786 int queue_pairs, r, i = 0, has_cvq = 0; 1787 1788 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1789 opts = &netdev->u.vhost_vdpa; 1790 if (!opts->vhostdev && !opts->vhostfd) { 1791 error_setg(errp, 1792 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); 1793 return -1; 1794 } 1795 1796 if (opts->vhostdev && opts->vhostfd) { 1797 error_setg(errp, 1798 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); 1799 return -1; 1800 } 1801 1802 if (opts->vhostdev) { 1803 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); 1804 if (vdpa_device_fd == -1) { 1805 return -errno; 1806 } 1807 } else { 1808 /* has_vhostfd */ 1809 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); 1810 if (vdpa_device_fd == -1) { 1811 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); 1812 return -1; 1813 } 1814 } 1815 1816 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); 1817 if (unlikely(r < 0)) { 1818 goto err; 1819 } 1820 1821 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, 1822 &has_cvq, errp); 1823 if (queue_pairs < 0) { 1824 qemu_close(vdpa_device_fd); 1825 return queue_pairs; 1826 } 1827 1828 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); 1829 if (unlikely(r < 0)) { 1830 error_setg(errp, "vhost-vdpa: get iova range failed: %s", 1831 strerror(-r)); 1832 goto err; 1833 } 1834 1835 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { 1836 goto err; 1837 } 1838 1839 ncs = g_malloc0(sizeof(*ncs) * queue_pairs); 1840 1841 for (i = 0; i < queue_pairs; i++) { 1842 VhostVDPAShared *shared = NULL; 1843 1844 if (i) { 1845 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; 1846 } 1847 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1848 vdpa_device_fd, i, 2, true, opts->x_svq, 1849 iova_range, features, shared, errp); 1850 if (!ncs[i]) 1851 goto err; 1852 } 1853 1854 if (has_cvq) { 1855 VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]); 1856 VhostVDPAShared *shared = s0->vhost_vdpa.shared; 1857 1858 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 1859 vdpa_device_fd, i, 1, false, 1860 opts->x_svq, iova_range, features, shared, 1861 errp); 1862 if (!nc) 1863 goto err; 1864 } 1865 1866 return 0; 1867 1868 err: 1869 if (i) { 1870 for (i--; i >= 0; i--) { 1871 qemu_del_net_client(ncs[i]); 1872 } 1873 } 1874 1875 qemu_close(vdpa_device_fd); 1876 1877 return -1; 1878 } 1879