1 /* 2 * vhost-vdpa.c 3 * 4 * Copyright(c) 2017-2018 Intel Corporation. 5 * Copyright(c) 2020 Red Hat, Inc. 6 * 7 * This work is licensed under the terms of the GNU GPL, version 2 or later. 8 * See the COPYING file in the top-level directory. 9 * 10 */ 11 12 #include "qemu/osdep.h" 13 #include "clients.h" 14 #include "hw/virtio/virtio-net.h" 15 #include "net/vhost_net.h" 16 #include "net/vhost-vdpa.h" 17 #include "hw/virtio/vhost-vdpa.h" 18 #include "qemu/config-file.h" 19 #include "qemu/error-report.h" 20 #include "qemu/log.h" 21 #include "qemu/memalign.h" 22 #include "qemu/option.h" 23 #include "qapi/error.h" 24 #include <linux/vhost.h> 25 #include <sys/ioctl.h> 26 #include <err.h> 27 #include "standard-headers/linux/virtio_net.h" 28 #include "monitor/monitor.h" 29 #include "hw/virtio/vhost.h" 30 31 /* Todo:need to add the multiqueue support here */ 32 typedef struct VhostVDPAState { 33 NetClientState nc; 34 struct vhost_vdpa vhost_vdpa; 35 VHostNetState *vhost_net; 36 37 /* Control commands shadow buffers */ 38 void *cvq_cmd_out_buffer; 39 virtio_net_ctrl_ack *status; 40 41 /* The device always have SVQ enabled */ 42 bool always_svq; 43 bool started; 44 } VhostVDPAState; 45 46 const int vdpa_feature_bits[] = { 47 VIRTIO_F_NOTIFY_ON_EMPTY, 48 VIRTIO_RING_F_INDIRECT_DESC, 49 VIRTIO_RING_F_EVENT_IDX, 50 VIRTIO_F_ANY_LAYOUT, 51 VIRTIO_F_VERSION_1, 52 VIRTIO_NET_F_CSUM, 53 VIRTIO_NET_F_GUEST_CSUM, 54 VIRTIO_NET_F_GSO, 55 VIRTIO_NET_F_GUEST_TSO4, 56 VIRTIO_NET_F_GUEST_TSO6, 57 VIRTIO_NET_F_GUEST_ECN, 58 VIRTIO_NET_F_GUEST_UFO, 59 VIRTIO_NET_F_HOST_TSO4, 60 VIRTIO_NET_F_HOST_TSO6, 61 VIRTIO_NET_F_HOST_ECN, 62 VIRTIO_NET_F_HOST_UFO, 63 VIRTIO_NET_F_MRG_RXBUF, 64 VIRTIO_NET_F_MTU, 65 VIRTIO_NET_F_CTRL_RX, 66 VIRTIO_NET_F_CTRL_RX_EXTRA, 67 VIRTIO_NET_F_CTRL_VLAN, 68 VIRTIO_NET_F_CTRL_MAC_ADDR, 69 VIRTIO_NET_F_RSS, 70 VIRTIO_NET_F_MQ, 71 VIRTIO_NET_F_CTRL_VQ, 72 VIRTIO_F_IOMMU_PLATFORM, 73 VIRTIO_F_RING_PACKED, 74 VIRTIO_F_RING_RESET, 75 VIRTIO_NET_F_RSS, 76 VIRTIO_NET_F_HASH_REPORT, 77 VIRTIO_NET_F_STATUS, 78 VHOST_INVALID_FEATURE_BIT 79 }; 80 81 /** Supported device specific feature bits with SVQ */ 82 static const uint64_t vdpa_svq_device_features = 83 BIT_ULL(VIRTIO_NET_F_CSUM) | 84 BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | 85 BIT_ULL(VIRTIO_NET_F_MTU) | 86 BIT_ULL(VIRTIO_NET_F_MAC) | 87 BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | 88 BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | 89 BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | 90 BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | 91 BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | 92 BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | 93 BIT_ULL(VIRTIO_NET_F_HOST_ECN) | 94 BIT_ULL(VIRTIO_NET_F_HOST_UFO) | 95 BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | 96 BIT_ULL(VIRTIO_NET_F_STATUS) | 97 BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | 98 BIT_ULL(VIRTIO_NET_F_MQ) | 99 BIT_ULL(VIRTIO_F_ANY_LAYOUT) | 100 BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | 101 BIT_ULL(VIRTIO_NET_F_RSC_EXT) | 102 BIT_ULL(VIRTIO_NET_F_STANDBY); 103 104 #define VHOST_VDPA_NET_CVQ_ASID 1 105 106 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) 107 { 108 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 109 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 110 return s->vhost_net; 111 } 112 113 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) 114 { 115 uint64_t invalid_dev_features = 116 features & ~vdpa_svq_device_features & 117 /* Transport are all accepted at this point */ 118 ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, 119 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); 120 121 if (invalid_dev_features) { 122 error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, 123 invalid_dev_features); 124 return false; 125 } 126 127 return vhost_svq_valid_features(features, errp); 128 } 129 130 static int vhost_vdpa_net_check_device_id(struct vhost_net *net) 131 { 132 uint32_t device_id; 133 int ret; 134 struct vhost_dev *hdev; 135 136 hdev = (struct vhost_dev *)&net->dev; 137 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); 138 if (device_id != VIRTIO_ID_NET) { 139 return -ENOTSUP; 140 } 141 return ret; 142 } 143 144 static int vhost_vdpa_add(NetClientState *ncs, void *be, 145 int queue_pair_index, int nvqs) 146 { 147 VhostNetOptions options; 148 struct vhost_net *net = NULL; 149 VhostVDPAState *s; 150 int ret; 151 152 options.backend_type = VHOST_BACKEND_TYPE_VDPA; 153 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 154 s = DO_UPCAST(VhostVDPAState, nc, ncs); 155 options.net_backend = ncs; 156 options.opaque = be; 157 options.busyloop_timeout = 0; 158 options.nvqs = nvqs; 159 160 net = vhost_net_init(&options); 161 if (!net) { 162 error_report("failed to init vhost_net for queue"); 163 goto err_init; 164 } 165 s->vhost_net = net; 166 ret = vhost_vdpa_net_check_device_id(net); 167 if (ret) { 168 goto err_check; 169 } 170 return 0; 171 err_check: 172 vhost_net_cleanup(net); 173 g_free(net); 174 err_init: 175 return -1; 176 } 177 178 static void vhost_vdpa_cleanup(NetClientState *nc) 179 { 180 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 181 182 qemu_vfree(s->cvq_cmd_out_buffer); 183 qemu_vfree(s->status); 184 if (s->vhost_net) { 185 vhost_net_cleanup(s->vhost_net); 186 g_free(s->vhost_net); 187 s->vhost_net = NULL; 188 } 189 if (s->vhost_vdpa.device_fd >= 0) { 190 qemu_close(s->vhost_vdpa.device_fd); 191 s->vhost_vdpa.device_fd = -1; 192 } 193 } 194 195 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) 196 { 197 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 198 199 return true; 200 } 201 202 static bool vhost_vdpa_has_ufo(NetClientState *nc) 203 { 204 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 205 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 206 uint64_t features = 0; 207 features |= (1ULL << VIRTIO_NET_F_HOST_UFO); 208 features = vhost_net_get_features(s->vhost_net, features); 209 return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); 210 211 } 212 213 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, 214 Error **errp) 215 { 216 const char *driver = object_class_get_name(oc); 217 218 if (!g_str_has_prefix(driver, "virtio-net-")) { 219 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); 220 return false; 221 } 222 223 return true; 224 } 225 226 /** Dummy receive in case qemu falls back to userland tap networking */ 227 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, 228 size_t size) 229 { 230 return size; 231 } 232 233 /** From any vdpa net client, get the netclient of the first queue pair */ 234 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) 235 { 236 NICState *nic = qemu_get_nic(s->nc.peer); 237 NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); 238 239 return DO_UPCAST(VhostVDPAState, nc, nc0); 240 } 241 242 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) 243 { 244 struct vhost_vdpa *v = &s->vhost_vdpa; 245 246 if (v->shadow_vqs_enabled) { 247 v->iova_tree = vhost_iova_tree_new(v->iova_range.first, 248 v->iova_range.last); 249 } 250 } 251 252 static int vhost_vdpa_net_data_start(NetClientState *nc) 253 { 254 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 255 struct vhost_vdpa *v = &s->vhost_vdpa; 256 257 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 258 259 if (v->index == 0) { 260 vhost_vdpa_net_data_start_first(s); 261 return 0; 262 } 263 264 if (v->shadow_vqs_enabled) { 265 VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s); 266 v->iova_tree = s0->vhost_vdpa.iova_tree; 267 } 268 269 return 0; 270 } 271 272 static void vhost_vdpa_net_client_stop(NetClientState *nc) 273 { 274 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 275 struct vhost_dev *dev; 276 277 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 278 279 dev = s->vhost_vdpa.dev; 280 if (dev->vq_index + dev->nvqs == dev->vq_index_end) { 281 g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); 282 } 283 } 284 285 static NetClientInfo net_vhost_vdpa_info = { 286 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 287 .size = sizeof(VhostVDPAState), 288 .receive = vhost_vdpa_receive, 289 .start = vhost_vdpa_net_data_start, 290 .stop = vhost_vdpa_net_client_stop, 291 .cleanup = vhost_vdpa_cleanup, 292 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 293 .has_ufo = vhost_vdpa_has_ufo, 294 .check_peer_type = vhost_vdpa_check_peer_type, 295 }; 296 297 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index) 298 { 299 struct vhost_vring_state state = { 300 .index = vq_index, 301 }; 302 int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); 303 304 if (unlikely(r < 0)) { 305 error_report("Cannot get VQ %u group: %s", vq_index, 306 g_strerror(errno)); 307 return r; 308 } 309 310 return state.num; 311 } 312 313 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, 314 unsigned vq_group, 315 unsigned asid_num) 316 { 317 struct vhost_vring_state asid = { 318 .index = vq_group, 319 .num = asid_num, 320 }; 321 int r; 322 323 r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); 324 if (unlikely(r < 0)) { 325 error_report("Can't set vq group %u asid %u, errno=%d (%s)", 326 asid.index, asid.num, errno, g_strerror(errno)); 327 } 328 return r; 329 } 330 331 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) 332 { 333 VhostIOVATree *tree = v->iova_tree; 334 DMAMap needle = { 335 /* 336 * No need to specify size or to look for more translations since 337 * this contiguous chunk was allocated by us. 338 */ 339 .translated_addr = (hwaddr)(uintptr_t)addr, 340 }; 341 const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); 342 int r; 343 344 if (unlikely(!map)) { 345 error_report("Cannot locate expected map"); 346 return; 347 } 348 349 r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1); 350 if (unlikely(r != 0)) { 351 error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); 352 } 353 354 vhost_iova_tree_remove(tree, *map); 355 } 356 357 static size_t vhost_vdpa_net_cvq_cmd_len(void) 358 { 359 /* 360 * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. 361 * In buffer is always 1 byte, so it should fit here 362 */ 363 return sizeof(struct virtio_net_ctrl_hdr) + 364 2 * sizeof(struct virtio_net_ctrl_mac) + 365 MAC_TABLE_ENTRIES * ETH_ALEN; 366 } 367 368 static size_t vhost_vdpa_net_cvq_cmd_page_len(void) 369 { 370 return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); 371 } 372 373 /** Map CVQ buffer. */ 374 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, 375 bool write) 376 { 377 DMAMap map = {}; 378 int r; 379 380 map.translated_addr = (hwaddr)(uintptr_t)buf; 381 map.size = size - 1; 382 map.perm = write ? IOMMU_RW : IOMMU_RO, 383 r = vhost_iova_tree_map_alloc(v->iova_tree, &map); 384 if (unlikely(r != IOVA_OK)) { 385 error_report("Cannot map injected element"); 386 return r; 387 } 388 389 r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova, 390 vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); 391 if (unlikely(r < 0)) { 392 goto dma_map_err; 393 } 394 395 return 0; 396 397 dma_map_err: 398 vhost_iova_tree_remove(v->iova_tree, map); 399 return r; 400 } 401 402 static int vhost_vdpa_net_cvq_start(NetClientState *nc) 403 { 404 VhostVDPAState *s, *s0; 405 struct vhost_vdpa *v; 406 uint64_t backend_features; 407 int64_t cvq_group; 408 int cvq_index, r; 409 410 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 411 412 s = DO_UPCAST(VhostVDPAState, nc, nc); 413 v = &s->vhost_vdpa; 414 415 v->shadow_data = s->always_svq; 416 v->shadow_vqs_enabled = s->always_svq; 417 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; 418 419 if (s->always_svq) { 420 /* SVQ is already configured for all virtqueues */ 421 goto out; 422 } 423 424 /* 425 * If we early return in these cases SVQ will not be enabled. The migration 426 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. 427 * 428 * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev 429 * yet. 430 */ 431 r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); 432 if (unlikely(r < 0)) { 433 error_report("Cannot get vdpa backend_features: %s(%d)", 434 g_strerror(errno), errno); 435 return -1; 436 } 437 if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) || 438 !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { 439 return 0; 440 } 441 442 /* 443 * Check if all the virtqueues of the virtio device are in a different vq 444 * than the last vq. VQ group of last group passed in cvq_group. 445 */ 446 cvq_index = v->dev->vq_index_end - 1; 447 cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index); 448 if (unlikely(cvq_group < 0)) { 449 return cvq_group; 450 } 451 for (int i = 0; i < cvq_index; ++i) { 452 int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i); 453 454 if (unlikely(group < 0)) { 455 return group; 456 } 457 458 if (group == cvq_group) { 459 return 0; 460 } 461 } 462 463 r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); 464 if (unlikely(r < 0)) { 465 return r; 466 } 467 468 v->shadow_vqs_enabled = true; 469 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; 470 471 out: 472 if (!s->vhost_vdpa.shadow_vqs_enabled) { 473 return 0; 474 } 475 476 s0 = vhost_vdpa_net_first_nc_vdpa(s); 477 if (s0->vhost_vdpa.iova_tree) { 478 /* 479 * SVQ is already configured for all virtqueues. Reuse IOVA tree for 480 * simplicity, whether CVQ shares ASID with guest or not, because: 481 * - Memory listener need access to guest's memory addresses allocated 482 * in the IOVA tree. 483 * - There should be plenty of IOVA address space for both ASID not to 484 * worry about collisions between them. Guest's translations are 485 * still validated with virtio virtqueue_pop so there is no risk for 486 * the guest to access memory that it shouldn't. 487 * 488 * To allocate a iova tree per ASID is doable but it complicates the 489 * code and it is not worth it for the moment. 490 */ 491 v->iova_tree = s0->vhost_vdpa.iova_tree; 492 } else { 493 v->iova_tree = vhost_iova_tree_new(v->iova_range.first, 494 v->iova_range.last); 495 } 496 497 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, 498 vhost_vdpa_net_cvq_cmd_page_len(), false); 499 if (unlikely(r < 0)) { 500 return r; 501 } 502 503 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, 504 vhost_vdpa_net_cvq_cmd_page_len(), true); 505 if (unlikely(r < 0)) { 506 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 507 } 508 509 return r; 510 } 511 512 static void vhost_vdpa_net_cvq_stop(NetClientState *nc) 513 { 514 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 515 516 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 517 518 if (s->vhost_vdpa.shadow_vqs_enabled) { 519 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 520 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); 521 } 522 523 vhost_vdpa_net_client_stop(nc); 524 } 525 526 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, 527 size_t in_len) 528 { 529 /* Buffers for the device */ 530 const struct iovec out = { 531 .iov_base = s->cvq_cmd_out_buffer, 532 .iov_len = out_len, 533 }; 534 const struct iovec in = { 535 .iov_base = s->status, 536 .iov_len = sizeof(virtio_net_ctrl_ack), 537 }; 538 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 539 int r; 540 541 r = vhost_svq_add(svq, &out, 1, &in, 1, NULL); 542 if (unlikely(r != 0)) { 543 if (unlikely(r == -ENOSPC)) { 544 qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", 545 __func__); 546 } 547 return r; 548 } 549 550 /* 551 * We can poll here since we've had BQL from the time we sent the 552 * descriptor. Also, we need to take the answer before SVQ pulls by itself, 553 * when BQL is released 554 */ 555 return vhost_svq_poll(svq); 556 } 557 558 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, 559 uint8_t cmd, const void *data, 560 size_t data_size) 561 { 562 const struct virtio_net_ctrl_hdr ctrl = { 563 .class = class, 564 .cmd = cmd, 565 }; 566 567 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); 568 569 memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); 570 memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size); 571 572 return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size, 573 sizeof(virtio_net_ctrl_ack)); 574 } 575 576 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) 577 { 578 uint64_t features = n->parent_obj.guest_features; 579 if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) { 580 ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC, 581 VIRTIO_NET_CTRL_MAC_ADDR_SET, 582 n->mac, sizeof(n->mac)); 583 if (unlikely(dev_written < 0)) { 584 return dev_written; 585 } 586 587 return *s->status != VIRTIO_NET_OK; 588 } 589 590 return 0; 591 } 592 593 static int vhost_vdpa_net_load_mq(VhostVDPAState *s, 594 const VirtIONet *n) 595 { 596 struct virtio_net_ctrl_mq mq; 597 uint64_t features = n->parent_obj.guest_features; 598 ssize_t dev_written; 599 600 if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) { 601 return 0; 602 } 603 604 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); 605 dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ, 606 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq, 607 sizeof(mq)); 608 if (unlikely(dev_written < 0)) { 609 return dev_written; 610 } 611 612 return *s->status != VIRTIO_NET_OK; 613 } 614 615 static int vhost_vdpa_net_load(NetClientState *nc) 616 { 617 VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 618 struct vhost_vdpa *v = &s->vhost_vdpa; 619 const VirtIONet *n; 620 int r; 621 622 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 623 624 if (!v->shadow_vqs_enabled) { 625 return 0; 626 } 627 628 n = VIRTIO_NET(v->dev->vdev); 629 r = vhost_vdpa_net_load_mac(s, n); 630 if (unlikely(r < 0)) { 631 return r; 632 } 633 r = vhost_vdpa_net_load_mq(s, n); 634 if (unlikely(r)) { 635 return r; 636 } 637 638 return 0; 639 } 640 641 static NetClientInfo net_vhost_vdpa_cvq_info = { 642 .type = NET_CLIENT_DRIVER_VHOST_VDPA, 643 .size = sizeof(VhostVDPAState), 644 .receive = vhost_vdpa_receive, 645 .start = vhost_vdpa_net_cvq_start, 646 .load = vhost_vdpa_net_load, 647 .stop = vhost_vdpa_net_cvq_stop, 648 .cleanup = vhost_vdpa_cleanup, 649 .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 650 .has_ufo = vhost_vdpa_has_ufo, 651 .check_peer_type = vhost_vdpa_check_peer_type, 652 }; 653 654 /** 655 * Validate and copy control virtqueue commands. 656 * 657 * Following QEMU guidelines, we offer a copy of the buffers to the device to 658 * prevent TOCTOU bugs. 659 */ 660 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, 661 VirtQueueElement *elem, 662 void *opaque) 663 { 664 VhostVDPAState *s = opaque; 665 size_t in_len; 666 virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 667 /* Out buffer sent to both the vdpa device and the device model */ 668 struct iovec out = { 669 .iov_base = s->cvq_cmd_out_buffer, 670 }; 671 /* in buffer used for device model */ 672 const struct iovec in = { 673 .iov_base = &status, 674 .iov_len = sizeof(status), 675 }; 676 ssize_t dev_written = -EINVAL; 677 678 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, 679 s->cvq_cmd_out_buffer, 680 vhost_vdpa_net_cvq_cmd_len()); 681 if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) { 682 /* 683 * Guest announce capability is emulated by qemu, so don't forward to 684 * the device. 685 */ 686 dev_written = sizeof(status); 687 *s->status = VIRTIO_NET_OK; 688 } else { 689 dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status)); 690 if (unlikely(dev_written < 0)) { 691 goto out; 692 } 693 } 694 695 if (unlikely(dev_written < sizeof(status))) { 696 error_report("Insufficient written data (%zu)", dev_written); 697 goto out; 698 } 699 700 if (*s->status != VIRTIO_NET_OK) { 701 return VIRTIO_NET_ERR; 702 } 703 704 status = VIRTIO_NET_ERR; 705 virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1); 706 if (status != VIRTIO_NET_OK) { 707 error_report("Bad CVQ processing in model"); 708 } 709 710 out: 711 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, 712 sizeof(status)); 713 if (unlikely(in_len < sizeof(status))) { 714 error_report("Bad device CVQ written length"); 715 } 716 vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); 717 g_free(elem); 718 return dev_written < 0 ? dev_written : 0; 719 } 720 721 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { 722 .avail_handler = vhost_vdpa_net_handle_ctrl_avail, 723 }; 724 725 static NetClientState *net_vhost_vdpa_init(NetClientState *peer, 726 const char *device, 727 const char *name, 728 int vdpa_device_fd, 729 int queue_pair_index, 730 int nvqs, 731 bool is_datapath, 732 bool svq, 733 struct vhost_vdpa_iova_range iova_range) 734 { 735 NetClientState *nc = NULL; 736 VhostVDPAState *s; 737 int ret = 0; 738 assert(name); 739 if (is_datapath) { 740 nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, 741 name); 742 } else { 743 nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, 744 device, name); 745 } 746 qemu_set_info_str(nc, TYPE_VHOST_VDPA); 747 s = DO_UPCAST(VhostVDPAState, nc, nc); 748 749 s->vhost_vdpa.device_fd = vdpa_device_fd; 750 s->vhost_vdpa.index = queue_pair_index; 751 s->always_svq = svq; 752 s->vhost_vdpa.shadow_vqs_enabled = svq; 753 s->vhost_vdpa.iova_range = iova_range; 754 s->vhost_vdpa.shadow_data = svq; 755 if (!is_datapath) { 756 s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), 757 vhost_vdpa_net_cvq_cmd_page_len()); 758 memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); 759 s->status = qemu_memalign(qemu_real_host_page_size(), 760 vhost_vdpa_net_cvq_cmd_page_len()); 761 memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len()); 762 763 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; 764 s->vhost_vdpa.shadow_vq_ops_opaque = s; 765 } 766 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); 767 if (ret) { 768 qemu_del_net_client(nc); 769 return NULL; 770 } 771 return nc; 772 } 773 774 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) 775 { 776 int ret = ioctl(fd, VHOST_GET_FEATURES, features); 777 if (unlikely(ret < 0)) { 778 error_setg_errno(errp, errno, 779 "Fail to query features from vhost-vDPA device"); 780 } 781 return ret; 782 } 783 784 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, 785 int *has_cvq, Error **errp) 786 { 787 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 788 g_autofree struct vhost_vdpa_config *config = NULL; 789 __virtio16 *max_queue_pairs; 790 int ret; 791 792 if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { 793 *has_cvq = 1; 794 } else { 795 *has_cvq = 0; 796 } 797 798 if (features & (1 << VIRTIO_NET_F_MQ)) { 799 config = g_malloc0(config_size + sizeof(*max_queue_pairs)); 800 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); 801 config->len = sizeof(*max_queue_pairs); 802 803 ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); 804 if (ret) { 805 error_setg(errp, "Fail to get config from vhost-vDPA device"); 806 return -ret; 807 } 808 809 max_queue_pairs = (__virtio16 *)&config->buf; 810 811 return lduw_le_p(max_queue_pairs); 812 } 813 814 return 1; 815 } 816 817 int net_init_vhost_vdpa(const Netdev *netdev, const char *name, 818 NetClientState *peer, Error **errp) 819 { 820 const NetdevVhostVDPAOptions *opts; 821 uint64_t features; 822 int vdpa_device_fd; 823 g_autofree NetClientState **ncs = NULL; 824 struct vhost_vdpa_iova_range iova_range; 825 NetClientState *nc; 826 int queue_pairs, r, i = 0, has_cvq = 0; 827 828 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); 829 opts = &netdev->u.vhost_vdpa; 830 if (!opts->vhostdev && !opts->vhostfd) { 831 error_setg(errp, 832 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); 833 return -1; 834 } 835 836 if (opts->vhostdev && opts->vhostfd) { 837 error_setg(errp, 838 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); 839 return -1; 840 } 841 842 if (opts->vhostdev) { 843 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); 844 if (vdpa_device_fd == -1) { 845 return -errno; 846 } 847 } else { 848 /* has_vhostfd */ 849 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); 850 if (vdpa_device_fd == -1) { 851 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); 852 return -1; 853 } 854 } 855 856 r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); 857 if (unlikely(r < 0)) { 858 goto err; 859 } 860 861 queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, 862 &has_cvq, errp); 863 if (queue_pairs < 0) { 864 qemu_close(vdpa_device_fd); 865 return queue_pairs; 866 } 867 868 r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); 869 if (unlikely(r < 0)) { 870 error_setg(errp, "vhost-vdpa: get iova range failed: %s", 871 strerror(-r)); 872 goto err; 873 } 874 875 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { 876 goto err; 877 } 878 879 ncs = g_malloc0(sizeof(*ncs) * queue_pairs); 880 881 for (i = 0; i < queue_pairs; i++) { 882 ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 883 vdpa_device_fd, i, 2, true, opts->x_svq, 884 iova_range); 885 if (!ncs[i]) 886 goto err; 887 } 888 889 if (has_cvq) { 890 nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 891 vdpa_device_fd, i, 1, false, 892 opts->x_svq, iova_range); 893 if (!nc) 894 goto err; 895 } 896 897 return 0; 898 899 err: 900 if (i) { 901 for (i--; i >= 0; i--) { 902 qemu_del_net_client(ncs[i]); 903 } 904 } 905 906 qemu_close(vdpa_device_fd); 907 908 return -1; 909 } 910