11e0a84eaSCindy Lu /* 21e0a84eaSCindy Lu * vhost-vdpa.c 31e0a84eaSCindy Lu * 41e0a84eaSCindy Lu * Copyright(c) 2017-2018 Intel Corporation. 51e0a84eaSCindy Lu * Copyright(c) 2020 Red Hat, Inc. 61e0a84eaSCindy Lu * 71e0a84eaSCindy Lu * This work is licensed under the terms of the GNU GPL, version 2 or later. 81e0a84eaSCindy Lu * See the COPYING file in the top-level directory. 91e0a84eaSCindy Lu * 101e0a84eaSCindy Lu */ 111e0a84eaSCindy Lu 121e0a84eaSCindy Lu #include "qemu/osdep.h" 131e0a84eaSCindy Lu #include "clients.h" 14bd907ae4SEugenio Pérez #include "hw/virtio/virtio-net.h" 151e0a84eaSCindy Lu #include "net/vhost_net.h" 161e0a84eaSCindy Lu #include "net/vhost-vdpa.h" 171e0a84eaSCindy Lu #include "hw/virtio/vhost-vdpa.h" 181e0a84eaSCindy Lu #include "qemu/config-file.h" 191e0a84eaSCindy Lu #include "qemu/error-report.h" 20bd907ae4SEugenio Pérez #include "qemu/log.h" 21bd907ae4SEugenio Pérez #include "qemu/memalign.h" 221e0a84eaSCindy Lu #include "qemu/option.h" 231e0a84eaSCindy Lu #include "qapi/error.h" 2440237840SJason Wang #include <linux/vhost.h> 251e0a84eaSCindy Lu #include <sys/ioctl.h> 261e0a84eaSCindy Lu #include <err.h> 271e0a84eaSCindy Lu #include "standard-headers/linux/virtio_net.h" 281e0a84eaSCindy Lu #include "monitor/monitor.h" 2969498430SEugenio Pérez #include "migration/migration.h" 3069498430SEugenio Pérez #include "migration/misc.h" 311e0a84eaSCindy Lu #include "hw/virtio/vhost.h" 321e0a84eaSCindy Lu 331e0a84eaSCindy Lu /* Todo:need to add the multiqueue support here */ 341e0a84eaSCindy Lu typedef struct VhostVDPAState { 351e0a84eaSCindy Lu NetClientState nc; 361e0a84eaSCindy Lu struct vhost_vdpa vhost_vdpa; 3769498430SEugenio Pérez Notifier migration_state; 381e0a84eaSCindy Lu VHostNetState *vhost_net; 392df4dd31SEugenio Pérez 402df4dd31SEugenio Pérez /* Control commands shadow buffers */ 4117fb889fSEugenio Pérez void *cvq_cmd_out_buffer; 4217fb889fSEugenio Pérez virtio_net_ctrl_ack *status; 4317fb889fSEugenio Pérez 447f211a28SEugenio Pérez /* The device always have SVQ enabled */ 457f211a28SEugenio Pérez bool always_svq; 46152128d6SEugenio Pérez 47152128d6SEugenio Pérez /* The device can isolate CVQ in its own ASID */ 48152128d6SEugenio Pérez bool cvq_isolated; 49152128d6SEugenio Pérez 501e0a84eaSCindy Lu bool started; 511e0a84eaSCindy Lu } VhostVDPAState; 521e0a84eaSCindy Lu 532875a0caSHawkins Jiawei /* 542875a0caSHawkins Jiawei * The array is sorted alphabetically in ascending order, 552875a0caSHawkins Jiawei * with the exception of VHOST_INVALID_FEATURE_BIT, 562875a0caSHawkins Jiawei * which should always be the last entry. 572875a0caSHawkins Jiawei */ 581e0a84eaSCindy Lu const int vdpa_feature_bits[] = { 591e0a84eaSCindy Lu VIRTIO_F_ANY_LAYOUT, 602875a0caSHawkins Jiawei VIRTIO_F_IOMMU_PLATFORM, 612875a0caSHawkins Jiawei VIRTIO_F_NOTIFY_ON_EMPTY, 622875a0caSHawkins Jiawei VIRTIO_F_RING_PACKED, 632875a0caSHawkins Jiawei VIRTIO_F_RING_RESET, 641e0a84eaSCindy Lu VIRTIO_F_VERSION_1, 651e0a84eaSCindy Lu VIRTIO_NET_F_CSUM, 6651e84244SEugenio Pérez VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, 672875a0caSHawkins Jiawei VIRTIO_NET_F_CTRL_MAC_ADDR, 6840237840SJason Wang VIRTIO_NET_F_CTRL_RX, 6940237840SJason Wang VIRTIO_NET_F_CTRL_RX_EXTRA, 7040237840SJason Wang VIRTIO_NET_F_CTRL_VLAN, 7140237840SJason Wang VIRTIO_NET_F_CTRL_VQ, 722875a0caSHawkins Jiawei VIRTIO_NET_F_GSO, 732875a0caSHawkins Jiawei VIRTIO_NET_F_GUEST_CSUM, 742875a0caSHawkins Jiawei VIRTIO_NET_F_GUEST_ECN, 752875a0caSHawkins Jiawei VIRTIO_NET_F_GUEST_TSO4, 762875a0caSHawkins Jiawei VIRTIO_NET_F_GUEST_TSO6, 772875a0caSHawkins Jiawei VIRTIO_NET_F_GUEST_UFO, 789da16849SAndrew Melnychenko VIRTIO_NET_F_GUEST_USO4, 799da16849SAndrew Melnychenko VIRTIO_NET_F_GUEST_USO6, 800145c393SAndrew Melnychenko VIRTIO_NET_F_HASH_REPORT, 812875a0caSHawkins Jiawei VIRTIO_NET_F_HOST_ECN, 822875a0caSHawkins Jiawei VIRTIO_NET_F_HOST_TSO4, 832875a0caSHawkins Jiawei VIRTIO_NET_F_HOST_TSO6, 842875a0caSHawkins Jiawei VIRTIO_NET_F_HOST_UFO, 859da16849SAndrew Melnychenko VIRTIO_NET_F_HOST_USO, 862875a0caSHawkins Jiawei VIRTIO_NET_F_MQ, 872875a0caSHawkins Jiawei VIRTIO_NET_F_MRG_RXBUF, 882875a0caSHawkins Jiawei VIRTIO_NET_F_MTU, 892875a0caSHawkins Jiawei VIRTIO_NET_F_RSS, 909aa47eddSSi-Wei Liu VIRTIO_NET_F_STATUS, 912875a0caSHawkins Jiawei VIRTIO_RING_F_EVENT_IDX, 922875a0caSHawkins Jiawei VIRTIO_RING_F_INDIRECT_DESC, 932875a0caSHawkins Jiawei 942875a0caSHawkins Jiawei /* VHOST_INVALID_FEATURE_BIT should always be the last entry */ 951e0a84eaSCindy Lu VHOST_INVALID_FEATURE_BIT 961e0a84eaSCindy Lu }; 971e0a84eaSCindy Lu 981576dbb5SEugenio Pérez /** Supported device specific feature bits with SVQ */ 991576dbb5SEugenio Pérez static const uint64_t vdpa_svq_device_features = 1001576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_CSUM) | 1011576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | 1024b4a1378SHawkins Jiawei BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | 1031576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_MTU) | 1041576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_MAC) | 1051576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | 1061576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | 1071576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | 1081576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | 1091576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | 1101576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | 1111576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_HOST_ECN) | 1121576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_HOST_UFO) | 1131576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | 1141576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_STATUS) | 1151576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | 116ea6eec49SHawkins Jiawei BIT_ULL(VIRTIO_NET_F_CTRL_RX) | 117e213c45aSHawkins Jiawei BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) | 118d669b7bbSHawkins Jiawei BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) | 11972b99a87SEugenio Pérez BIT_ULL(VIRTIO_NET_F_MQ) | 1201576dbb5SEugenio Pérez BIT_ULL(VIRTIO_F_ANY_LAYOUT) | 1211576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | 122609ab4c3SEugenio Pérez /* VHOST_F_LOG_ALL is exposed by SVQ */ 123609ab4c3SEugenio Pérez BIT_ULL(VHOST_F_LOG_ALL) | 1241576dbb5SEugenio Pérez BIT_ULL(VIRTIO_NET_F_RSC_EXT) | 1250d74e2b7SEugenio Pérez BIT_ULL(VIRTIO_NET_F_STANDBY) | 1260d74e2b7SEugenio Pérez BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX); 1271576dbb5SEugenio Pérez 128c1a10086SEugenio Pérez #define VHOST_VDPA_NET_CVQ_ASID 1 129c1a10086SEugenio Pérez 1301e0a84eaSCindy Lu VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) 1311e0a84eaSCindy Lu { 1321e0a84eaSCindy Lu VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 1331e0a84eaSCindy Lu assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1341e0a84eaSCindy Lu return s->vhost_net; 1351e0a84eaSCindy Lu } 1361e0a84eaSCindy Lu 137915bf6ccSEugenio Pérez static size_t vhost_vdpa_net_cvq_cmd_len(void) 138915bf6ccSEugenio Pérez { 139915bf6ccSEugenio Pérez /* 140915bf6ccSEugenio Pérez * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. 141915bf6ccSEugenio Pérez * In buffer is always 1 byte, so it should fit here 142915bf6ccSEugenio Pérez */ 143915bf6ccSEugenio Pérez return sizeof(struct virtio_net_ctrl_hdr) + 144915bf6ccSEugenio Pérez 2 * sizeof(struct virtio_net_ctrl_mac) + 145915bf6ccSEugenio Pérez MAC_TABLE_ENTRIES * ETH_ALEN; 146915bf6ccSEugenio Pérez } 147915bf6ccSEugenio Pérez 148915bf6ccSEugenio Pérez static size_t vhost_vdpa_net_cvq_cmd_page_len(void) 149915bf6ccSEugenio Pérez { 150915bf6ccSEugenio Pérez return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size()); 151915bf6ccSEugenio Pérez } 152915bf6ccSEugenio Pérez 15336e46472SEugenio Pérez static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) 15436e46472SEugenio Pérez { 15536e46472SEugenio Pérez uint64_t invalid_dev_features = 15636e46472SEugenio Pérez features & ~vdpa_svq_device_features & 15736e46472SEugenio Pérez /* Transport are all accepted at this point */ 15836e46472SEugenio Pérez ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, 15936e46472SEugenio Pérez VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); 16036e46472SEugenio Pérez 16136e46472SEugenio Pérez if (invalid_dev_features) { 16236e46472SEugenio Pérez error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, 16336e46472SEugenio Pérez invalid_dev_features); 164258a0394SEugenio Pérez return false; 16536e46472SEugenio Pérez } 16636e46472SEugenio Pérez 167258a0394SEugenio Pérez return vhost_svq_valid_features(features, errp); 16836e46472SEugenio Pérez } 16936e46472SEugenio Pérez 1701e0a84eaSCindy Lu static int vhost_vdpa_net_check_device_id(struct vhost_net *net) 1711e0a84eaSCindy Lu { 1721e0a84eaSCindy Lu uint32_t device_id; 1731e0a84eaSCindy Lu int ret; 1741e0a84eaSCindy Lu struct vhost_dev *hdev; 1751e0a84eaSCindy Lu 1761e0a84eaSCindy Lu hdev = (struct vhost_dev *)&net->dev; 1771e0a84eaSCindy Lu ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); 1781e0a84eaSCindy Lu if (device_id != VIRTIO_ID_NET) { 1791e0a84eaSCindy Lu return -ENOTSUP; 1801e0a84eaSCindy Lu } 1811e0a84eaSCindy Lu return ret; 1821e0a84eaSCindy Lu } 1831e0a84eaSCindy Lu 18440237840SJason Wang static int vhost_vdpa_add(NetClientState *ncs, void *be, 18540237840SJason Wang int queue_pair_index, int nvqs) 1861e0a84eaSCindy Lu { 1871e0a84eaSCindy Lu VhostNetOptions options; 1881e0a84eaSCindy Lu struct vhost_net *net = NULL; 1891e0a84eaSCindy Lu VhostVDPAState *s; 1901e0a84eaSCindy Lu int ret; 1911e0a84eaSCindy Lu 1921e0a84eaSCindy Lu options.backend_type = VHOST_BACKEND_TYPE_VDPA; 1931e0a84eaSCindy Lu assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1941e0a84eaSCindy Lu s = DO_UPCAST(VhostVDPAState, nc, ncs); 1951e0a84eaSCindy Lu options.net_backend = ncs; 1961e0a84eaSCindy Lu options.opaque = be; 1971e0a84eaSCindy Lu options.busyloop_timeout = 0; 19840237840SJason Wang options.nvqs = nvqs; 1991e0a84eaSCindy Lu 2001e0a84eaSCindy Lu net = vhost_net_init(&options); 2011e0a84eaSCindy Lu if (!net) { 2021e0a84eaSCindy Lu error_report("failed to init vhost_net for queue"); 203a97ef87aSJason Wang goto err_init; 2041e0a84eaSCindy Lu } 2051e0a84eaSCindy Lu s->vhost_net = net; 2061e0a84eaSCindy Lu ret = vhost_vdpa_net_check_device_id(net); 2071e0a84eaSCindy Lu if (ret) { 208a97ef87aSJason Wang goto err_check; 2091e0a84eaSCindy Lu } 2101e0a84eaSCindy Lu return 0; 211a97ef87aSJason Wang err_check: 2121e0a84eaSCindy Lu vhost_net_cleanup(net); 213ab36edcfSJason Wang g_free(net); 214a97ef87aSJason Wang err_init: 2151e0a84eaSCindy Lu return -1; 2161e0a84eaSCindy Lu } 2171e0a84eaSCindy Lu 2181e0a84eaSCindy Lu static void vhost_vdpa_cleanup(NetClientState *nc) 2191e0a84eaSCindy Lu { 2201e0a84eaSCindy Lu VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 2211e0a84eaSCindy Lu 222a0d7215eSAni Sinha /* 223a0d7215eSAni Sinha * If a peer NIC is attached, do not cleanup anything. 224a0d7215eSAni Sinha * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() 225a0d7215eSAni Sinha * when the guest is shutting down. 226a0d7215eSAni Sinha */ 227a0d7215eSAni Sinha if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { 228a0d7215eSAni Sinha return; 229a0d7215eSAni Sinha } 230babf8b87SEugenio Pérez munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); 231babf8b87SEugenio Pérez munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); 2321e0a84eaSCindy Lu if (s->vhost_net) { 2331e0a84eaSCindy Lu vhost_net_cleanup(s->vhost_net); 2341e0a84eaSCindy Lu g_free(s->vhost_net); 2351e0a84eaSCindy Lu s->vhost_net = NULL; 2361e0a84eaSCindy Lu } 23757b3a7d8SCindy Lu if (s->vhost_vdpa.device_fd >= 0) { 23857b3a7d8SCindy Lu qemu_close(s->vhost_vdpa.device_fd); 23957b3a7d8SCindy Lu s->vhost_vdpa.device_fd = -1; 24057b3a7d8SCindy Lu } 2411e0a84eaSCindy Lu } 2421e0a84eaSCindy Lu 2431e0a84eaSCindy Lu static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc) 2441e0a84eaSCindy Lu { 2451e0a84eaSCindy Lu assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 2461e0a84eaSCindy Lu 2471e0a84eaSCindy Lu return true; 2481e0a84eaSCindy Lu } 2491e0a84eaSCindy Lu 2501e0a84eaSCindy Lu static bool vhost_vdpa_has_ufo(NetClientState *nc) 2511e0a84eaSCindy Lu { 2521e0a84eaSCindy Lu assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 2531e0a84eaSCindy Lu VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 2541e0a84eaSCindy Lu uint64_t features = 0; 2551e0a84eaSCindy Lu features |= (1ULL << VIRTIO_NET_F_HOST_UFO); 2561e0a84eaSCindy Lu features = vhost_net_get_features(s->vhost_net, features); 2571e0a84eaSCindy Lu return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO)); 2581e0a84eaSCindy Lu 2591e0a84eaSCindy Lu } 2601e0a84eaSCindy Lu 261ee8a1c63SKevin Wolf static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, 262ee8a1c63SKevin Wolf Error **errp) 263ee8a1c63SKevin Wolf { 264ee8a1c63SKevin Wolf const char *driver = object_class_get_name(oc); 265ee8a1c63SKevin Wolf 266ee8a1c63SKevin Wolf if (!g_str_has_prefix(driver, "virtio-net-")) { 267ee8a1c63SKevin Wolf error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); 268ee8a1c63SKevin Wolf return false; 269ee8a1c63SKevin Wolf } 270ee8a1c63SKevin Wolf 271ee8a1c63SKevin Wolf return true; 272ee8a1c63SKevin Wolf } 273ee8a1c63SKevin Wolf 274846a1e85SEugenio Pérez /** Dummy receive in case qemu falls back to userland tap networking */ 275846a1e85SEugenio Pérez static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, 276846a1e85SEugenio Pérez size_t size) 277846a1e85SEugenio Pérez { 278bc5add1dSSi-Wei Liu return size; 279846a1e85SEugenio Pérez } 280846a1e85SEugenio Pérez 28100ef422eSEugenio Pérez /** From any vdpa net client, get the netclient of the first queue pair */ 28200ef422eSEugenio Pérez static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s) 28300ef422eSEugenio Pérez { 28400ef422eSEugenio Pérez NICState *nic = qemu_get_nic(s->nc.peer); 28500ef422eSEugenio Pérez NetClientState *nc0 = qemu_get_peer(nic->ncs, 0); 28600ef422eSEugenio Pérez 28700ef422eSEugenio Pérez return DO_UPCAST(VhostVDPAState, nc, nc0); 28800ef422eSEugenio Pérez } 28900ef422eSEugenio Pérez 29069498430SEugenio Pérez static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable) 29169498430SEugenio Pérez { 29269498430SEugenio Pérez struct vhost_vdpa *v = &s->vhost_vdpa; 29369498430SEugenio Pérez VirtIONet *n; 29469498430SEugenio Pérez VirtIODevice *vdev; 29569498430SEugenio Pérez int data_queue_pairs, cvq, r; 29669498430SEugenio Pérez 29769498430SEugenio Pérez /* We are only called on the first data vqs and only if x-svq is not set */ 29869498430SEugenio Pérez if (s->vhost_vdpa.shadow_vqs_enabled == enable) { 29969498430SEugenio Pérez return; 30069498430SEugenio Pérez } 30169498430SEugenio Pérez 30269498430SEugenio Pérez vdev = v->dev->vdev; 30369498430SEugenio Pérez n = VIRTIO_NET(vdev); 30469498430SEugenio Pérez if (!n->vhost_started) { 30569498430SEugenio Pérez return; 30669498430SEugenio Pérez } 30769498430SEugenio Pérez 30869498430SEugenio Pérez data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; 30969498430SEugenio Pérez cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? 31069498430SEugenio Pérez n->max_ncs - n->max_queue_pairs : 0; 31169498430SEugenio Pérez /* 31269498430SEugenio Pérez * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter 31369498430SEugenio Pérez * in the future and resume the device if read-only operations between 31469498430SEugenio Pérez * suspend and reset goes wrong. 31569498430SEugenio Pérez */ 31669498430SEugenio Pérez vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); 31769498430SEugenio Pérez 31869498430SEugenio Pérez /* Start will check migration setup_or_active to configure or not SVQ */ 31969498430SEugenio Pérez r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); 32069498430SEugenio Pérez if (unlikely(r < 0)) { 32169498430SEugenio Pérez error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); 32269498430SEugenio Pérez } 32369498430SEugenio Pérez } 32469498430SEugenio Pérez 32569498430SEugenio Pérez static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data) 32669498430SEugenio Pérez { 32769498430SEugenio Pérez MigrationState *migration = data; 32869498430SEugenio Pérez VhostVDPAState *s = container_of(notifier, VhostVDPAState, 32969498430SEugenio Pérez migration_state); 33069498430SEugenio Pérez 33169498430SEugenio Pérez if (migration_in_setup(migration)) { 33269498430SEugenio Pérez vhost_vdpa_net_log_global_enable(s, true); 33369498430SEugenio Pérez } else if (migration_has_failed(migration)) { 33469498430SEugenio Pérez vhost_vdpa_net_log_global_enable(s, false); 33569498430SEugenio Pérez } 33669498430SEugenio Pérez } 33769498430SEugenio Pérez 33800ef422eSEugenio Pérez static void vhost_vdpa_net_data_start_first(VhostVDPAState *s) 33900ef422eSEugenio Pérez { 34000ef422eSEugenio Pérez struct vhost_vdpa *v = &s->vhost_vdpa; 34100ef422eSEugenio Pérez 34269498430SEugenio Pérez add_migration_state_change_notifier(&s->migration_state); 34300ef422eSEugenio Pérez if (v->shadow_vqs_enabled) { 34400ef422eSEugenio Pérez v->iova_tree = vhost_iova_tree_new(v->iova_range.first, 34500ef422eSEugenio Pérez v->iova_range.last); 34600ef422eSEugenio Pérez } 34700ef422eSEugenio Pérez } 34800ef422eSEugenio Pérez 34900ef422eSEugenio Pérez static int vhost_vdpa_net_data_start(NetClientState *nc) 35000ef422eSEugenio Pérez { 35100ef422eSEugenio Pérez VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 35200ef422eSEugenio Pérez struct vhost_vdpa *v = &s->vhost_vdpa; 35300ef422eSEugenio Pérez 35400ef422eSEugenio Pérez assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 35500ef422eSEugenio Pérez 35669498430SEugenio Pérez if (s->always_svq || 35769498430SEugenio Pérez migration_is_setup_or_active(migrate_get_current()->state)) { 35869498430SEugenio Pérez v->shadow_vqs_enabled = true; 35969498430SEugenio Pérez v->shadow_data = true; 36069498430SEugenio Pérez } else { 36169498430SEugenio Pérez v->shadow_vqs_enabled = false; 36269498430SEugenio Pérez v->shadow_data = false; 36369498430SEugenio Pérez } 36469498430SEugenio Pérez 36500ef422eSEugenio Pérez if (v->index == 0) { 36600ef422eSEugenio Pérez vhost_vdpa_net_data_start_first(s); 36700ef422eSEugenio Pérez return 0; 36800ef422eSEugenio Pérez } 36900ef422eSEugenio Pérez 37000ef422eSEugenio Pérez if (v->shadow_vqs_enabled) { 37100ef422eSEugenio Pérez VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s); 37200ef422eSEugenio Pérez v->iova_tree = s0->vhost_vdpa.iova_tree; 37300ef422eSEugenio Pérez } 37400ef422eSEugenio Pérez 37500ef422eSEugenio Pérez return 0; 37600ef422eSEugenio Pérez } 37700ef422eSEugenio Pérez 3786c482547SEugenio Pérez static int vhost_vdpa_net_data_load(NetClientState *nc) 3796c482547SEugenio Pérez { 3806c482547SEugenio Pérez VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 3816c482547SEugenio Pérez struct vhost_vdpa *v = &s->vhost_vdpa; 3826c482547SEugenio Pérez bool has_cvq = v->dev->vq_index_end % 2; 3836c482547SEugenio Pérez 3846c482547SEugenio Pérez if (has_cvq) { 3856c482547SEugenio Pérez return 0; 3866c482547SEugenio Pérez } 3876c482547SEugenio Pérez 3886c482547SEugenio Pérez for (int i = 0; i < v->dev->nvqs; ++i) { 3896c482547SEugenio Pérez vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); 3906c482547SEugenio Pérez } 3916c482547SEugenio Pérez return 0; 3926c482547SEugenio Pérez } 3936c482547SEugenio Pérez 39400ef422eSEugenio Pérez static void vhost_vdpa_net_client_stop(NetClientState *nc) 39500ef422eSEugenio Pérez { 39600ef422eSEugenio Pérez VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 39700ef422eSEugenio Pérez struct vhost_dev *dev; 39800ef422eSEugenio Pérez 39900ef422eSEugenio Pérez assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 40000ef422eSEugenio Pérez 40169498430SEugenio Pérez if (s->vhost_vdpa.index == 0) { 40269498430SEugenio Pérez remove_migration_state_change_notifier(&s->migration_state); 40369498430SEugenio Pérez } 40469498430SEugenio Pérez 40500ef422eSEugenio Pérez dev = s->vhost_vdpa.dev; 40600ef422eSEugenio Pérez if (dev->vq_index + dev->nvqs == dev->vq_index_end) { 40700ef422eSEugenio Pérez g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); 4080a7a164bSEugenio Pérez } else { 4090a7a164bSEugenio Pérez s->vhost_vdpa.iova_tree = NULL; 41000ef422eSEugenio Pérez } 41100ef422eSEugenio Pérez } 41200ef422eSEugenio Pérez 4131e0a84eaSCindy Lu static NetClientInfo net_vhost_vdpa_info = { 4141e0a84eaSCindy Lu .type = NET_CLIENT_DRIVER_VHOST_VDPA, 4151e0a84eaSCindy Lu .size = sizeof(VhostVDPAState), 416846a1e85SEugenio Pérez .receive = vhost_vdpa_receive, 41700ef422eSEugenio Pérez .start = vhost_vdpa_net_data_start, 4186c482547SEugenio Pérez .load = vhost_vdpa_net_data_load, 41900ef422eSEugenio Pérez .stop = vhost_vdpa_net_client_stop, 4201e0a84eaSCindy Lu .cleanup = vhost_vdpa_cleanup, 4211e0a84eaSCindy Lu .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 4221e0a84eaSCindy Lu .has_ufo = vhost_vdpa_has_ufo, 423ee8a1c63SKevin Wolf .check_peer_type = vhost_vdpa_check_peer_type, 4241e0a84eaSCindy Lu }; 4251e0a84eaSCindy Lu 426152128d6SEugenio Pérez static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index, 427152128d6SEugenio Pérez Error **errp) 428c1a10086SEugenio Pérez { 429c1a10086SEugenio Pérez struct vhost_vring_state state = { 430c1a10086SEugenio Pérez .index = vq_index, 431c1a10086SEugenio Pérez }; 432c1a10086SEugenio Pérez int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); 433c1a10086SEugenio Pérez 434c1a10086SEugenio Pérez if (unlikely(r < 0)) { 4350f2bb0bfSEugenio Pérez r = -errno; 436152128d6SEugenio Pérez error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index); 437c1a10086SEugenio Pérez return r; 438c1a10086SEugenio Pérez } 439c1a10086SEugenio Pérez 440c1a10086SEugenio Pérez return state.num; 441c1a10086SEugenio Pérez } 442c1a10086SEugenio Pérez 443c1a10086SEugenio Pérez static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, 444c1a10086SEugenio Pérez unsigned vq_group, 445c1a10086SEugenio Pérez unsigned asid_num) 446c1a10086SEugenio Pérez { 447c1a10086SEugenio Pérez struct vhost_vring_state asid = { 448c1a10086SEugenio Pérez .index = vq_group, 449c1a10086SEugenio Pérez .num = asid_num, 450c1a10086SEugenio Pérez }; 451c1a10086SEugenio Pérez int r; 452c1a10086SEugenio Pérez 453c1a10086SEugenio Pérez r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); 454c1a10086SEugenio Pérez if (unlikely(r < 0)) { 455c1a10086SEugenio Pérez error_report("Can't set vq group %u asid %u, errno=%d (%s)", 456c1a10086SEugenio Pérez asid.index, asid.num, errno, g_strerror(errno)); 457c1a10086SEugenio Pérez } 458c1a10086SEugenio Pérez return r; 459c1a10086SEugenio Pérez } 460c1a10086SEugenio Pérez 4612df4dd31SEugenio Pérez static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) 4622df4dd31SEugenio Pérez { 4632df4dd31SEugenio Pérez VhostIOVATree *tree = v->iova_tree; 4642df4dd31SEugenio Pérez DMAMap needle = { 4652df4dd31SEugenio Pérez /* 4662df4dd31SEugenio Pérez * No need to specify size or to look for more translations since 4672df4dd31SEugenio Pérez * this contiguous chunk was allocated by us. 4682df4dd31SEugenio Pérez */ 4692df4dd31SEugenio Pérez .translated_addr = (hwaddr)(uintptr_t)addr, 4702df4dd31SEugenio Pérez }; 4712df4dd31SEugenio Pérez const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); 4722df4dd31SEugenio Pérez int r; 4732df4dd31SEugenio Pérez 4742df4dd31SEugenio Pérez if (unlikely(!map)) { 4752df4dd31SEugenio Pérez error_report("Cannot locate expected map"); 4762df4dd31SEugenio Pérez return; 4772df4dd31SEugenio Pérez } 4782df4dd31SEugenio Pérez 479cd831ed5SEugenio Pérez r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1); 4802df4dd31SEugenio Pérez if (unlikely(r != 0)) { 4812df4dd31SEugenio Pérez error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); 4822df4dd31SEugenio Pérez } 4832df4dd31SEugenio Pérez 48469292a8eSEugenio Pérez vhost_iova_tree_remove(tree, *map); 4852df4dd31SEugenio Pérez } 4862df4dd31SEugenio Pérez 4877a7f87e9SEugenio Pérez /** Map CVQ buffer. */ 4887a7f87e9SEugenio Pérez static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, 4897a7f87e9SEugenio Pérez bool write) 4902df4dd31SEugenio Pérez { 4912df4dd31SEugenio Pérez DMAMap map = {}; 4922df4dd31SEugenio Pérez int r; 4932df4dd31SEugenio Pérez 4942df4dd31SEugenio Pérez map.translated_addr = (hwaddr)(uintptr_t)buf; 4957a7f87e9SEugenio Pérez map.size = size - 1; 4962df4dd31SEugenio Pérez map.perm = write ? IOMMU_RW : IOMMU_RO, 4972df4dd31SEugenio Pérez r = vhost_iova_tree_map_alloc(v->iova_tree, &map); 4982df4dd31SEugenio Pérez if (unlikely(r != IOVA_OK)) { 4992df4dd31SEugenio Pérez error_report("Cannot map injected element"); 5007a7f87e9SEugenio Pérez return r; 5012df4dd31SEugenio Pérez } 5022df4dd31SEugenio Pérez 503cd831ed5SEugenio Pérez r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova, 504cd831ed5SEugenio Pérez vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); 5052df4dd31SEugenio Pérez if (unlikely(r < 0)) { 5062df4dd31SEugenio Pérez goto dma_map_err; 5072df4dd31SEugenio Pérez } 5082df4dd31SEugenio Pérez 5097a7f87e9SEugenio Pérez return 0; 5102df4dd31SEugenio Pérez 5112df4dd31SEugenio Pérez dma_map_err: 51269292a8eSEugenio Pérez vhost_iova_tree_remove(v->iova_tree, map); 5137a7f87e9SEugenio Pérez return r; 5142df4dd31SEugenio Pérez } 5152df4dd31SEugenio Pérez 5167a7f87e9SEugenio Pérez static int vhost_vdpa_net_cvq_start(NetClientState *nc) 5172df4dd31SEugenio Pérez { 51800ef422eSEugenio Pérez VhostVDPAState *s, *s0; 519c1a10086SEugenio Pérez struct vhost_vdpa *v; 520c1a10086SEugenio Pérez int64_t cvq_group; 521152128d6SEugenio Pérez int r; 522152128d6SEugenio Pérez Error *err = NULL; 5232df4dd31SEugenio Pérez 5247a7f87e9SEugenio Pérez assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 5257a7f87e9SEugenio Pérez 5267a7f87e9SEugenio Pérez s = DO_UPCAST(VhostVDPAState, nc, nc); 527c1a10086SEugenio Pérez v = &s->vhost_vdpa; 528c1a10086SEugenio Pérez 52969498430SEugenio Pérez s0 = vhost_vdpa_net_first_nc_vdpa(s); 53069498430SEugenio Pérez v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled; 531b40eba9cSEugenio Pérez v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; 532c1a10086SEugenio Pérez s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; 533c1a10086SEugenio Pérez 53469498430SEugenio Pérez if (s->vhost_vdpa.shadow_data) { 535c1a10086SEugenio Pérez /* SVQ is already configured for all virtqueues */ 536c1a10086SEugenio Pérez goto out; 537c1a10086SEugenio Pérez } 538c1a10086SEugenio Pérez 539c1a10086SEugenio Pérez /* 540c1a10086SEugenio Pérez * If we early return in these cases SVQ will not be enabled. The migration 541c1a10086SEugenio Pérez * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. 542c1a10086SEugenio Pérez */ 543152128d6SEugenio Pérez if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { 544c1a10086SEugenio Pérez return 0; 545c1a10086SEugenio Pérez } 546c1a10086SEugenio Pérez 547152128d6SEugenio Pérez if (!s->cvq_isolated) { 548152128d6SEugenio Pérez return 0; 549152128d6SEugenio Pérez } 550152128d6SEugenio Pérez 551152128d6SEugenio Pérez cvq_group = vhost_vdpa_get_vring_group(v->device_fd, 552152128d6SEugenio Pérez v->dev->vq_index_end - 1, 553152128d6SEugenio Pérez &err); 554c1a10086SEugenio Pérez if (unlikely(cvq_group < 0)) { 555152128d6SEugenio Pérez error_report_err(err); 556c1a10086SEugenio Pérez return cvq_group; 557c1a10086SEugenio Pérez } 558c1a10086SEugenio Pérez 559c1a10086SEugenio Pérez r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); 560c1a10086SEugenio Pérez if (unlikely(r < 0)) { 561c1a10086SEugenio Pérez return r; 562c1a10086SEugenio Pérez } 563c1a10086SEugenio Pérez 564c1a10086SEugenio Pérez v->shadow_vqs_enabled = true; 565c1a10086SEugenio Pérez s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; 566c1a10086SEugenio Pérez 567c1a10086SEugenio Pérez out: 5687a7f87e9SEugenio Pérez if (!s->vhost_vdpa.shadow_vqs_enabled) { 5697a7f87e9SEugenio Pérez return 0; 5702df4dd31SEugenio Pérez } 5712df4dd31SEugenio Pérez 57200ef422eSEugenio Pérez if (s0->vhost_vdpa.iova_tree) { 57300ef422eSEugenio Pérez /* 57400ef422eSEugenio Pérez * SVQ is already configured for all virtqueues. Reuse IOVA tree for 57500ef422eSEugenio Pérez * simplicity, whether CVQ shares ASID with guest or not, because: 57600ef422eSEugenio Pérez * - Memory listener need access to guest's memory addresses allocated 57700ef422eSEugenio Pérez * in the IOVA tree. 57800ef422eSEugenio Pérez * - There should be plenty of IOVA address space for both ASID not to 57900ef422eSEugenio Pérez * worry about collisions between them. Guest's translations are 58000ef422eSEugenio Pérez * still validated with virtio virtqueue_pop so there is no risk for 58100ef422eSEugenio Pérez * the guest to access memory that it shouldn't. 58200ef422eSEugenio Pérez * 58300ef422eSEugenio Pérez * To allocate a iova tree per ASID is doable but it complicates the 58400ef422eSEugenio Pérez * code and it is not worth it for the moment. 58500ef422eSEugenio Pérez */ 58600ef422eSEugenio Pérez v->iova_tree = s0->vhost_vdpa.iova_tree; 58700ef422eSEugenio Pérez } else { 58800ef422eSEugenio Pérez v->iova_tree = vhost_iova_tree_new(v->iova_range.first, 58900ef422eSEugenio Pérez v->iova_range.last); 59000ef422eSEugenio Pérez } 59100ef422eSEugenio Pérez 5927a7f87e9SEugenio Pérez r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, 5937a7f87e9SEugenio Pérez vhost_vdpa_net_cvq_cmd_page_len(), false); 5947a7f87e9SEugenio Pérez if (unlikely(r < 0)) { 5957a7f87e9SEugenio Pérez return r; 5967a7f87e9SEugenio Pérez } 5977a7f87e9SEugenio Pérez 59817fb889fSEugenio Pérez r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, 5997a7f87e9SEugenio Pérez vhost_vdpa_net_cvq_cmd_page_len(), true); 6007a7f87e9SEugenio Pérez if (unlikely(r < 0)) { 6012df4dd31SEugenio Pérez vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 6022df4dd31SEugenio Pérez } 6032df4dd31SEugenio Pérez 6047a7f87e9SEugenio Pérez return r; 6057a7f87e9SEugenio Pérez } 6067a7f87e9SEugenio Pérez 6077a7f87e9SEugenio Pérez static void vhost_vdpa_net_cvq_stop(NetClientState *nc) 6087a7f87e9SEugenio Pérez { 6097a7f87e9SEugenio Pérez VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 6107a7f87e9SEugenio Pérez 6117a7f87e9SEugenio Pérez assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 6127a7f87e9SEugenio Pérez 6137a7f87e9SEugenio Pérez if (s->vhost_vdpa.shadow_vqs_enabled) { 6147a7f87e9SEugenio Pérez vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); 61517fb889fSEugenio Pérez vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); 616c1a10086SEugenio Pérez } 61700ef422eSEugenio Pérez 61800ef422eSEugenio Pérez vhost_vdpa_net_client_stop(nc); 6192df4dd31SEugenio Pérez } 6202df4dd31SEugenio Pérez 6210e6bff0dSHawkins Jiawei static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, 6220e6bff0dSHawkins Jiawei const struct iovec *out_sg, size_t out_num, 6230e6bff0dSHawkins Jiawei const struct iovec *in_sg, size_t in_num) 624be4278b6SEugenio Pérez { 625be4278b6SEugenio Pérez VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 626be4278b6SEugenio Pérez int r; 627be4278b6SEugenio Pérez 6280e6bff0dSHawkins Jiawei r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL); 629be4278b6SEugenio Pérez if (unlikely(r != 0)) { 630be4278b6SEugenio Pérez if (unlikely(r == -ENOSPC)) { 631be4278b6SEugenio Pérez qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", 632be4278b6SEugenio Pérez __func__); 633be4278b6SEugenio Pérez } 634a864a321SHawkins Jiawei } 635a864a321SHawkins Jiawei 636be4278b6SEugenio Pérez return r; 637be4278b6SEugenio Pérez } 638be4278b6SEugenio Pérez 639be4278b6SEugenio Pérez /* 640a864a321SHawkins Jiawei * Convenience wrapper to poll SVQ for multiple control commands. 641a864a321SHawkins Jiawei * 642a864a321SHawkins Jiawei * Caller should hold the BQL when invoking this function, and should take 643a864a321SHawkins Jiawei * the answer before SVQ pulls by itself when BQL is released. 644be4278b6SEugenio Pérez */ 645a864a321SHawkins Jiawei static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight) 646a864a321SHawkins Jiawei { 647a864a321SHawkins Jiawei VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 648a864a321SHawkins Jiawei return vhost_svq_poll(svq, cmds_in_flight); 649be4278b6SEugenio Pérez } 650be4278b6SEugenio Pérez 6511d7e2a8fSHawkins Jiawei static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s, 6521d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 6531d7e2a8fSHawkins Jiawei struct iovec *in_cursor) 6541d7e2a8fSHawkins Jiawei { 6551d7e2a8fSHawkins Jiawei /* reset the cursor of the output buffer for the device */ 6561d7e2a8fSHawkins Jiawei out_cursor->iov_base = s->cvq_cmd_out_buffer; 6571d7e2a8fSHawkins Jiawei out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 6581d7e2a8fSHawkins Jiawei 6591d7e2a8fSHawkins Jiawei /* reset the cursor of the in buffer for the device */ 6601d7e2a8fSHawkins Jiawei in_cursor->iov_base = s->status; 6611d7e2a8fSHawkins Jiawei in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); 6621d7e2a8fSHawkins Jiawei } 6631d7e2a8fSHawkins Jiawei 664*acec5f68SHawkins Jiawei /* 665*acec5f68SHawkins Jiawei * Poll SVQ for multiple pending control commands and check the device's ack. 666*acec5f68SHawkins Jiawei * 667*acec5f68SHawkins Jiawei * Caller should hold the BQL when invoking this function. 668*acec5f68SHawkins Jiawei * 669*acec5f68SHawkins Jiawei * @s: The VhostVDPAState 670*acec5f68SHawkins Jiawei * @len: The length of the pending status shadow buffer 671*acec5f68SHawkins Jiawei */ 672*acec5f68SHawkins Jiawei static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len) 673*acec5f68SHawkins Jiawei { 674*acec5f68SHawkins Jiawei /* device uses a one-byte length ack for each control command */ 675*acec5f68SHawkins Jiawei ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len); 676*acec5f68SHawkins Jiawei if (unlikely(dev_written != len)) { 677*acec5f68SHawkins Jiawei return -EIO; 678*acec5f68SHawkins Jiawei } 679*acec5f68SHawkins Jiawei 680*acec5f68SHawkins Jiawei /* check the device's ack */ 681*acec5f68SHawkins Jiawei for (int i = 0; i < len; ++i) { 682*acec5f68SHawkins Jiawei if (s->status[i] != VIRTIO_NET_OK) { 683*acec5f68SHawkins Jiawei return -EIO; 684*acec5f68SHawkins Jiawei } 685*acec5f68SHawkins Jiawei } 686*acec5f68SHawkins Jiawei return 0; 687*acec5f68SHawkins Jiawei } 688*acec5f68SHawkins Jiawei 6891d7e2a8fSHawkins Jiawei static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, 6901d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 6911d7e2a8fSHawkins Jiawei struct iovec *in_cursor, uint8_t class, 6922848c6aaSHawkins Jiawei uint8_t cmd, const struct iovec *data_sg, 6932848c6aaSHawkins Jiawei size_t data_num) 694f73c0c43SEugenio Pérez { 695f73c0c43SEugenio Pérez const struct virtio_net_ctrl_hdr ctrl = { 696f73c0c43SEugenio Pérez .class = class, 697f73c0c43SEugenio Pérez .cmd = cmd, 698f73c0c43SEugenio Pérez }; 699*acec5f68SHawkins Jiawei size_t data_size = iov_size(data_sg, data_num), cmd_size; 7001d7e2a8fSHawkins Jiawei struct iovec out, in; 701a864a321SHawkins Jiawei ssize_t r; 702*acec5f68SHawkins Jiawei unsigned dummy_cursor_iov_cnt; 703*acec5f68SHawkins Jiawei VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); 704f73c0c43SEugenio Pérez 705f73c0c43SEugenio Pérez assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); 706*acec5f68SHawkins Jiawei cmd_size = sizeof(ctrl) + data_size; 707*acec5f68SHawkins Jiawei if (vhost_svq_available_slots(svq) < 2 || 708*acec5f68SHawkins Jiawei iov_size(out_cursor, 1) < cmd_size) { 709*acec5f68SHawkins Jiawei /* 710*acec5f68SHawkins Jiawei * It is time to flush all pending control commands if SVQ is full 711*acec5f68SHawkins Jiawei * or control commands shadow buffers are full. 712*acec5f68SHawkins Jiawei * 713*acec5f68SHawkins Jiawei * We can poll here since we've had BQL from the time 714*acec5f68SHawkins Jiawei * we sent the descriptor. 715*acec5f68SHawkins Jiawei */ 716*acec5f68SHawkins Jiawei r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - 717*acec5f68SHawkins Jiawei (void *)s->status); 718*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 719*acec5f68SHawkins Jiawei return r; 720*acec5f68SHawkins Jiawei } 721*acec5f68SHawkins Jiawei 722*acec5f68SHawkins Jiawei vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor); 723*acec5f68SHawkins Jiawei } 724f73c0c43SEugenio Pérez 7252848c6aaSHawkins Jiawei /* pack the CVQ command header */ 7261d7e2a8fSHawkins Jiawei iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl)); 7272848c6aaSHawkins Jiawei /* pack the CVQ command command-specific-data */ 7282848c6aaSHawkins Jiawei iov_to_buf(data_sg, data_num, 0, 7291d7e2a8fSHawkins Jiawei out_cursor->iov_base + sizeof(ctrl), data_size); 7301d7e2a8fSHawkins Jiawei 7311d7e2a8fSHawkins Jiawei /* extract the required buffer from the cursor for output */ 732*acec5f68SHawkins Jiawei iov_copy(&out, 1, out_cursor, 1, 0, cmd_size); 7331d7e2a8fSHawkins Jiawei /* extract the required buffer from the cursor for input */ 7341d7e2a8fSHawkins Jiawei iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); 7352848c6aaSHawkins Jiawei 736a864a321SHawkins Jiawei r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1); 737a864a321SHawkins Jiawei if (unlikely(r < 0)) { 738a864a321SHawkins Jiawei return r; 739a864a321SHawkins Jiawei } 740a864a321SHawkins Jiawei 741*acec5f68SHawkins Jiawei /* iterate the cursors */ 742*acec5f68SHawkins Jiawei dummy_cursor_iov_cnt = 1; 743*acec5f68SHawkins Jiawei iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size); 744*acec5f68SHawkins Jiawei dummy_cursor_iov_cnt = 1; 745*acec5f68SHawkins Jiawei iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); 746*acec5f68SHawkins Jiawei 747*acec5f68SHawkins Jiawei return 0; 748f73c0c43SEugenio Pérez } 749f73c0c43SEugenio Pérez 7501d7e2a8fSHawkins Jiawei static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n, 7511d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 7521d7e2a8fSHawkins Jiawei struct iovec *in_cursor) 753f73c0c43SEugenio Pérez { 75402d3bf09SHawkins Jiawei if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { 7552848c6aaSHawkins Jiawei const struct iovec data = { 7562848c6aaSHawkins Jiawei .iov_base = (void *)n->mac, 7572848c6aaSHawkins Jiawei .iov_len = sizeof(n->mac), 7582848c6aaSHawkins Jiawei }; 759*acec5f68SHawkins Jiawei ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 7601d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_MAC, 761f73c0c43SEugenio Pérez VIRTIO_NET_CTRL_MAC_ADDR_SET, 7622848c6aaSHawkins Jiawei &data, 1); 763*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 764*acec5f68SHawkins Jiawei return r; 765b479bc3cSHawkins Jiawei } 766f73c0c43SEugenio Pérez } 767f73c0c43SEugenio Pérez 7680ddcecb8SHawkins Jiawei /* 7690ddcecb8SHawkins Jiawei * According to VirtIO standard, "The device MUST have an 7700ddcecb8SHawkins Jiawei * empty MAC filtering table on reset.". 7710ddcecb8SHawkins Jiawei * 7720ddcecb8SHawkins Jiawei * Therefore, there is no need to send this CVQ command if the 7730ddcecb8SHawkins Jiawei * driver also sets an empty MAC filter table, which aligns with 7740ddcecb8SHawkins Jiawei * the device's defaults. 7750ddcecb8SHawkins Jiawei * 7760ddcecb8SHawkins Jiawei * Note that the device's defaults can mismatch the driver's 7770ddcecb8SHawkins Jiawei * configuration only at live migration. 7780ddcecb8SHawkins Jiawei */ 7790ddcecb8SHawkins Jiawei if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || 7800ddcecb8SHawkins Jiawei n->mac_table.in_use == 0) { 7810ddcecb8SHawkins Jiawei return 0; 7820ddcecb8SHawkins Jiawei } 7830ddcecb8SHawkins Jiawei 7840ddcecb8SHawkins Jiawei uint32_t uni_entries = n->mac_table.first_multi, 7850ddcecb8SHawkins Jiawei uni_macs_size = uni_entries * ETH_ALEN, 7860ddcecb8SHawkins Jiawei mul_entries = n->mac_table.in_use - uni_entries, 7870ddcecb8SHawkins Jiawei mul_macs_size = mul_entries * ETH_ALEN; 7880ddcecb8SHawkins Jiawei struct virtio_net_ctrl_mac uni = { 7890ddcecb8SHawkins Jiawei .entries = cpu_to_le32(uni_entries), 7900ddcecb8SHawkins Jiawei }; 7910ddcecb8SHawkins Jiawei struct virtio_net_ctrl_mac mul = { 7920ddcecb8SHawkins Jiawei .entries = cpu_to_le32(mul_entries), 7930ddcecb8SHawkins Jiawei }; 7940ddcecb8SHawkins Jiawei const struct iovec data[] = { 7950ddcecb8SHawkins Jiawei { 7960ddcecb8SHawkins Jiawei .iov_base = &uni, 7970ddcecb8SHawkins Jiawei .iov_len = sizeof(uni), 7980ddcecb8SHawkins Jiawei }, { 7990ddcecb8SHawkins Jiawei .iov_base = n->mac_table.macs, 8000ddcecb8SHawkins Jiawei .iov_len = uni_macs_size, 8010ddcecb8SHawkins Jiawei }, { 8020ddcecb8SHawkins Jiawei .iov_base = &mul, 8030ddcecb8SHawkins Jiawei .iov_len = sizeof(mul), 8040ddcecb8SHawkins Jiawei }, { 8050ddcecb8SHawkins Jiawei .iov_base = &n->mac_table.macs[uni_macs_size], 8060ddcecb8SHawkins Jiawei .iov_len = mul_macs_size, 8070ddcecb8SHawkins Jiawei }, 8080ddcecb8SHawkins Jiawei }; 809*acec5f68SHawkins Jiawei ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 8100ddcecb8SHawkins Jiawei VIRTIO_NET_CTRL_MAC, 8110ddcecb8SHawkins Jiawei VIRTIO_NET_CTRL_MAC_TABLE_SET, 8120ddcecb8SHawkins Jiawei data, ARRAY_SIZE(data)); 813*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 814*acec5f68SHawkins Jiawei return r; 8150ddcecb8SHawkins Jiawei } 8160ddcecb8SHawkins Jiawei 817f73c0c43SEugenio Pérez return 0; 818f73c0c43SEugenio Pérez } 819f73c0c43SEugenio Pérez 820f64c7cdaSEugenio Pérez static int vhost_vdpa_net_load_mq(VhostVDPAState *s, 8211d7e2a8fSHawkins Jiawei const VirtIONet *n, 8221d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 8231d7e2a8fSHawkins Jiawei struct iovec *in_cursor) 824f64c7cdaSEugenio Pérez { 825f64c7cdaSEugenio Pérez struct virtio_net_ctrl_mq mq; 826*acec5f68SHawkins Jiawei ssize_t r; 827f64c7cdaSEugenio Pérez 82802d3bf09SHawkins Jiawei if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { 829f64c7cdaSEugenio Pérez return 0; 830f64c7cdaSEugenio Pérez } 831f64c7cdaSEugenio Pérez 832f64c7cdaSEugenio Pérez mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); 8332848c6aaSHawkins Jiawei const struct iovec data = { 8342848c6aaSHawkins Jiawei .iov_base = &mq, 8352848c6aaSHawkins Jiawei .iov_len = sizeof(mq), 8362848c6aaSHawkins Jiawei }; 837*acec5f68SHawkins Jiawei r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 8381d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_MQ, 8392848c6aaSHawkins Jiawei VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, 8402848c6aaSHawkins Jiawei &data, 1); 841*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 842*acec5f68SHawkins Jiawei return r; 843f45fd95eSHawkins Jiawei } 844f64c7cdaSEugenio Pérez 845f45fd95eSHawkins Jiawei return 0; 846f64c7cdaSEugenio Pérez } 847f64c7cdaSEugenio Pérez 8480b58d368SHawkins Jiawei static int vhost_vdpa_net_load_offloads(VhostVDPAState *s, 8491d7e2a8fSHawkins Jiawei const VirtIONet *n, 8501d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 8511d7e2a8fSHawkins Jiawei struct iovec *in_cursor) 8520b58d368SHawkins Jiawei { 8530b58d368SHawkins Jiawei uint64_t offloads; 854*acec5f68SHawkins Jiawei ssize_t r; 8550b58d368SHawkins Jiawei 8560b58d368SHawkins Jiawei if (!virtio_vdev_has_feature(&n->parent_obj, 8570b58d368SHawkins Jiawei VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) { 8580b58d368SHawkins Jiawei return 0; 8590b58d368SHawkins Jiawei } 8600b58d368SHawkins Jiawei 8610b58d368SHawkins Jiawei if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { 8620b58d368SHawkins Jiawei /* 8630b58d368SHawkins Jiawei * According to VirtIO standard, "Upon feature negotiation 8640b58d368SHawkins Jiawei * corresponding offload gets enabled to preserve 8650b58d368SHawkins Jiawei * backward compatibility.". 8660b58d368SHawkins Jiawei * 8670b58d368SHawkins Jiawei * Therefore, there is no need to send this CVQ command if the 8680b58d368SHawkins Jiawei * driver also enables all supported offloads, which aligns with 8690b58d368SHawkins Jiawei * the device's defaults. 8700b58d368SHawkins Jiawei * 8710b58d368SHawkins Jiawei * Note that the device's defaults can mismatch the driver's 8720b58d368SHawkins Jiawei * configuration only at live migration. 8730b58d368SHawkins Jiawei */ 8740b58d368SHawkins Jiawei return 0; 8750b58d368SHawkins Jiawei } 8760b58d368SHawkins Jiawei 8770b58d368SHawkins Jiawei offloads = cpu_to_le64(n->curr_guest_offloads); 8782848c6aaSHawkins Jiawei const struct iovec data = { 8792848c6aaSHawkins Jiawei .iov_base = &offloads, 8802848c6aaSHawkins Jiawei .iov_len = sizeof(offloads), 8812848c6aaSHawkins Jiawei }; 882*acec5f68SHawkins Jiawei r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 8831d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_GUEST_OFFLOADS, 8840b58d368SHawkins Jiawei VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, 8852848c6aaSHawkins Jiawei &data, 1); 886*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 887*acec5f68SHawkins Jiawei return r; 8886f348071SHawkins Jiawei } 8890b58d368SHawkins Jiawei 8906f348071SHawkins Jiawei return 0; 8910b58d368SHawkins Jiawei } 8920b58d368SHawkins Jiawei 893b12f907eSHawkins Jiawei static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s, 8941d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 8951d7e2a8fSHawkins Jiawei struct iovec *in_cursor, 896b12f907eSHawkins Jiawei uint8_t cmd, 897b12f907eSHawkins Jiawei uint8_t on) 898b12f907eSHawkins Jiawei { 899b12f907eSHawkins Jiawei const struct iovec data = { 900b12f907eSHawkins Jiawei .iov_base = &on, 901b12f907eSHawkins Jiawei .iov_len = sizeof(on), 902b12f907eSHawkins Jiawei }; 903*acec5f68SHawkins Jiawei ssize_t r; 90424e59cfeSHawkins Jiawei 905*acec5f68SHawkins Jiawei r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 906*acec5f68SHawkins Jiawei VIRTIO_NET_CTRL_RX, cmd, &data, 1); 907*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 908*acec5f68SHawkins Jiawei return r; 90924e59cfeSHawkins Jiawei } 91024e59cfeSHawkins Jiawei 91124e59cfeSHawkins Jiawei return 0; 912b12f907eSHawkins Jiawei } 913b12f907eSHawkins Jiawei 914b12f907eSHawkins Jiawei static int vhost_vdpa_net_load_rx(VhostVDPAState *s, 9151d7e2a8fSHawkins Jiawei const VirtIONet *n, 9161d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 9171d7e2a8fSHawkins Jiawei struct iovec *in_cursor) 918b12f907eSHawkins Jiawei { 91924e59cfeSHawkins Jiawei ssize_t r; 920b12f907eSHawkins Jiawei 921b12f907eSHawkins Jiawei if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { 922b12f907eSHawkins Jiawei return 0; 923b12f907eSHawkins Jiawei } 924b12f907eSHawkins Jiawei 925b12f907eSHawkins Jiawei /* 926b12f907eSHawkins Jiawei * According to virtio_net_reset(), device turns promiscuous mode 927b12f907eSHawkins Jiawei * on by default. 928b12f907eSHawkins Jiawei * 9290a19d879SMichael Tokarev * Additionally, according to VirtIO standard, "Since there are 930b12f907eSHawkins Jiawei * no guarantees, it can use a hash filter or silently switch to 931b12f907eSHawkins Jiawei * allmulti or promiscuous mode if it is given too many addresses.". 932b12f907eSHawkins Jiawei * QEMU marks `n->mac_table.uni_overflow` if guest sets too many 933b12f907eSHawkins Jiawei * non-multicast MAC addresses, indicating that promiscuous mode 934b12f907eSHawkins Jiawei * should be enabled. 935b12f907eSHawkins Jiawei * 936b12f907eSHawkins Jiawei * Therefore, QEMU should only send this CVQ command if the 937b12f907eSHawkins Jiawei * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, 938b12f907eSHawkins Jiawei * which sets promiscuous mode on, different from the device's defaults. 939b12f907eSHawkins Jiawei * 940b12f907eSHawkins Jiawei * Note that the device's defaults can mismatch the driver's 941b12f907eSHawkins Jiawei * configuration only at live migration. 942b12f907eSHawkins Jiawei */ 943b12f907eSHawkins Jiawei if (!n->mac_table.uni_overflow && !n->promisc) { 9441d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 9451d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_RX_PROMISC, 0); 94624e59cfeSHawkins Jiawei if (unlikely(r < 0)) { 94724e59cfeSHawkins Jiawei return r; 948b12f907eSHawkins Jiawei } 949b12f907eSHawkins Jiawei } 950b12f907eSHawkins Jiawei 951b12f907eSHawkins Jiawei /* 952b12f907eSHawkins Jiawei * According to virtio_net_reset(), device turns all-multicast mode 953b12f907eSHawkins Jiawei * off by default. 954b12f907eSHawkins Jiawei * 955b12f907eSHawkins Jiawei * According to VirtIO standard, "Since there are no guarantees, 956b12f907eSHawkins Jiawei * it can use a hash filter or silently switch to allmulti or 957b12f907eSHawkins Jiawei * promiscuous mode if it is given too many addresses.". QEMU marks 958b12f907eSHawkins Jiawei * `n->mac_table.multi_overflow` if guest sets too many 959b12f907eSHawkins Jiawei * non-multicast MAC addresses. 960b12f907eSHawkins Jiawei * 961b12f907eSHawkins Jiawei * Therefore, QEMU should only send this CVQ command if the 962b12f907eSHawkins Jiawei * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, 963b12f907eSHawkins Jiawei * which sets all-multicast mode on, different from the device's defaults. 964b12f907eSHawkins Jiawei * 965b12f907eSHawkins Jiawei * Note that the device's defaults can mismatch the driver's 966b12f907eSHawkins Jiawei * configuration only at live migration. 967b12f907eSHawkins Jiawei */ 968b12f907eSHawkins Jiawei if (n->mac_table.multi_overflow || n->allmulti) { 9691d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 9701d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_RX_ALLMULTI, 1); 97124e59cfeSHawkins Jiawei if (unlikely(r < 0)) { 97224e59cfeSHawkins Jiawei return r; 973b12f907eSHawkins Jiawei } 974b12f907eSHawkins Jiawei } 975b12f907eSHawkins Jiawei 9764fd180c7SHawkins Jiawei if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { 9774fd180c7SHawkins Jiawei return 0; 9784fd180c7SHawkins Jiawei } 9794fd180c7SHawkins Jiawei 9804fd180c7SHawkins Jiawei /* 9814fd180c7SHawkins Jiawei * According to virtio_net_reset(), device turns all-unicast mode 9824fd180c7SHawkins Jiawei * off by default. 9834fd180c7SHawkins Jiawei * 9844fd180c7SHawkins Jiawei * Therefore, QEMU should only send this CVQ command if the driver 9854fd180c7SHawkins Jiawei * sets all-unicast mode on, different from the device's defaults. 9864fd180c7SHawkins Jiawei * 9874fd180c7SHawkins Jiawei * Note that the device's defaults can mismatch the driver's 9884fd180c7SHawkins Jiawei * configuration only at live migration. 9894fd180c7SHawkins Jiawei */ 9904fd180c7SHawkins Jiawei if (n->alluni) { 9911d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 9921d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_RX_ALLUNI, 1); 99324e59cfeSHawkins Jiawei if (r < 0) { 99424e59cfeSHawkins Jiawei return r; 9954fd180c7SHawkins Jiawei } 9964fd180c7SHawkins Jiawei } 9974fd180c7SHawkins Jiawei 9984fd180c7SHawkins Jiawei /* 9994fd180c7SHawkins Jiawei * According to virtio_net_reset(), device turns non-multicast mode 10004fd180c7SHawkins Jiawei * off by default. 10014fd180c7SHawkins Jiawei * 10024fd180c7SHawkins Jiawei * Therefore, QEMU should only send this CVQ command if the driver 10034fd180c7SHawkins Jiawei * sets non-multicast mode on, different from the device's defaults. 10044fd180c7SHawkins Jiawei * 10054fd180c7SHawkins Jiawei * Note that the device's defaults can mismatch the driver's 10064fd180c7SHawkins Jiawei * configuration only at live migration. 10074fd180c7SHawkins Jiawei */ 10084fd180c7SHawkins Jiawei if (n->nomulti) { 10091d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 10101d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_RX_NOMULTI, 1); 101124e59cfeSHawkins Jiawei if (r < 0) { 101224e59cfeSHawkins Jiawei return r; 10134fd180c7SHawkins Jiawei } 10144fd180c7SHawkins Jiawei } 10154fd180c7SHawkins Jiawei 10164fd180c7SHawkins Jiawei /* 10174fd180c7SHawkins Jiawei * According to virtio_net_reset(), device turns non-unicast mode 10184fd180c7SHawkins Jiawei * off by default. 10194fd180c7SHawkins Jiawei * 10204fd180c7SHawkins Jiawei * Therefore, QEMU should only send this CVQ command if the driver 10214fd180c7SHawkins Jiawei * sets non-unicast mode on, different from the device's defaults. 10224fd180c7SHawkins Jiawei * 10234fd180c7SHawkins Jiawei * Note that the device's defaults can mismatch the driver's 10244fd180c7SHawkins Jiawei * configuration only at live migration. 10254fd180c7SHawkins Jiawei */ 10264fd180c7SHawkins Jiawei if (n->nouni) { 10271d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 10281d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_RX_NOUNI, 1); 102924e59cfeSHawkins Jiawei if (r < 0) { 103024e59cfeSHawkins Jiawei return r; 10314fd180c7SHawkins Jiawei } 10324fd180c7SHawkins Jiawei } 10334fd180c7SHawkins Jiawei 10344fd180c7SHawkins Jiawei /* 10354fd180c7SHawkins Jiawei * According to virtio_net_reset(), device turns non-broadcast mode 10364fd180c7SHawkins Jiawei * off by default. 10374fd180c7SHawkins Jiawei * 10384fd180c7SHawkins Jiawei * Therefore, QEMU should only send this CVQ command if the driver 10394fd180c7SHawkins Jiawei * sets non-broadcast mode on, different from the device's defaults. 10404fd180c7SHawkins Jiawei * 10414fd180c7SHawkins Jiawei * Note that the device's defaults can mismatch the driver's 10424fd180c7SHawkins Jiawei * configuration only at live migration. 10434fd180c7SHawkins Jiawei */ 10444fd180c7SHawkins Jiawei if (n->nobcast) { 10451d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor, 10461d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_RX_NOBCAST, 1); 104724e59cfeSHawkins Jiawei if (r < 0) { 104824e59cfeSHawkins Jiawei return r; 10494fd180c7SHawkins Jiawei } 10504fd180c7SHawkins Jiawei } 10514fd180c7SHawkins Jiawei 1052b12f907eSHawkins Jiawei return 0; 1053b12f907eSHawkins Jiawei } 1054b12f907eSHawkins Jiawei 10558f7e9967SHawkins Jiawei static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s, 10568f7e9967SHawkins Jiawei const VirtIONet *n, 10571d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 10581d7e2a8fSHawkins Jiawei struct iovec *in_cursor, 10598f7e9967SHawkins Jiawei uint16_t vid) 10608f7e9967SHawkins Jiawei { 10618f7e9967SHawkins Jiawei const struct iovec data = { 10628f7e9967SHawkins Jiawei .iov_base = &vid, 10638f7e9967SHawkins Jiawei .iov_len = sizeof(vid), 10648f7e9967SHawkins Jiawei }; 1065*acec5f68SHawkins Jiawei ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor, 10661d7e2a8fSHawkins Jiawei VIRTIO_NET_CTRL_VLAN, 10678f7e9967SHawkins Jiawei VIRTIO_NET_CTRL_VLAN_ADD, 10688f7e9967SHawkins Jiawei &data, 1); 1069*acec5f68SHawkins Jiawei if (unlikely(r < 0)) { 1070*acec5f68SHawkins Jiawei return r; 10718f7e9967SHawkins Jiawei } 10728f7e9967SHawkins Jiawei 10738f7e9967SHawkins Jiawei return 0; 10748f7e9967SHawkins Jiawei } 10758f7e9967SHawkins Jiawei 10768f7e9967SHawkins Jiawei static int vhost_vdpa_net_load_vlan(VhostVDPAState *s, 10771d7e2a8fSHawkins Jiawei const VirtIONet *n, 10781d7e2a8fSHawkins Jiawei struct iovec *out_cursor, 10791d7e2a8fSHawkins Jiawei struct iovec *in_cursor) 10808f7e9967SHawkins Jiawei { 10818f7e9967SHawkins Jiawei int r; 10828f7e9967SHawkins Jiawei 10838f7e9967SHawkins Jiawei if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { 10848f7e9967SHawkins Jiawei return 0; 10858f7e9967SHawkins Jiawei } 10868f7e9967SHawkins Jiawei 10878f7e9967SHawkins Jiawei for (int i = 0; i < MAX_VLAN >> 5; i++) { 10888f7e9967SHawkins Jiawei for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { 10898f7e9967SHawkins Jiawei if (n->vlans[i] & (1U << j)) { 10901d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor, 10911d7e2a8fSHawkins Jiawei in_cursor, (i << 5) + j); 10928f7e9967SHawkins Jiawei if (unlikely(r != 0)) { 10938f7e9967SHawkins Jiawei return r; 10948f7e9967SHawkins Jiawei } 10958f7e9967SHawkins Jiawei } 10968f7e9967SHawkins Jiawei } 10978f7e9967SHawkins Jiawei } 10988f7e9967SHawkins Jiawei 10998f7e9967SHawkins Jiawei return 0; 11008f7e9967SHawkins Jiawei } 11018f7e9967SHawkins Jiawei 1102f3fada59SEugenio Pérez static int vhost_vdpa_net_cvq_load(NetClientState *nc) 1103dd036d8dSEugenio Pérez { 1104dd036d8dSEugenio Pérez VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); 1105f73c0c43SEugenio Pérez struct vhost_vdpa *v = &s->vhost_vdpa; 1106dd036d8dSEugenio Pérez const VirtIONet *n; 1107f73c0c43SEugenio Pérez int r; 11081d7e2a8fSHawkins Jiawei struct iovec out_cursor, in_cursor; 1109dd036d8dSEugenio Pérez 1110dd036d8dSEugenio Pérez assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); 1111dd036d8dSEugenio Pérez 11126c482547SEugenio Pérez vhost_vdpa_set_vring_ready(v, v->dev->vq_index); 1113dd036d8dSEugenio Pérez 11146c482547SEugenio Pérez if (v->shadow_vqs_enabled) { 1115dd036d8dSEugenio Pérez n = VIRTIO_NET(v->dev->vdev); 11161d7e2a8fSHawkins Jiawei vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor); 11171d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor); 1118f73c0c43SEugenio Pérez if (unlikely(r < 0)) { 1119f73c0c43SEugenio Pérez return r; 1120dd036d8dSEugenio Pérez } 11211d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor); 1122f64c7cdaSEugenio Pérez if (unlikely(r)) { 1123f64c7cdaSEugenio Pérez return r; 1124f64c7cdaSEugenio Pérez } 11251d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor); 11260b58d368SHawkins Jiawei if (unlikely(r)) { 11270b58d368SHawkins Jiawei return r; 11280b58d368SHawkins Jiawei } 11291d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor); 1130b12f907eSHawkins Jiawei if (unlikely(r)) { 1131b12f907eSHawkins Jiawei return r; 1132b12f907eSHawkins Jiawei } 11331d7e2a8fSHawkins Jiawei r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor); 11348f7e9967SHawkins Jiawei if (unlikely(r)) { 11358f7e9967SHawkins Jiawei return r; 11368f7e9967SHawkins Jiawei } 1137*acec5f68SHawkins Jiawei 1138*acec5f68SHawkins Jiawei /* 1139*acec5f68SHawkins Jiawei * We need to poll and check all pending device's used buffers. 1140*acec5f68SHawkins Jiawei * 1141*acec5f68SHawkins Jiawei * We can poll here since we've had BQL from the time 1142*acec5f68SHawkins Jiawei * we sent the descriptor. 1143*acec5f68SHawkins Jiawei */ 1144*acec5f68SHawkins Jiawei r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); 1145*acec5f68SHawkins Jiawei if (unlikely(r)) { 1146*acec5f68SHawkins Jiawei return r; 1147*acec5f68SHawkins Jiawei } 11486c482547SEugenio Pérez } 11496c482547SEugenio Pérez 11506c482547SEugenio Pérez for (int i = 0; i < v->dev->vq_index; ++i) { 11516c482547SEugenio Pérez vhost_vdpa_set_vring_ready(v, i); 11526c482547SEugenio Pérez } 1153dd036d8dSEugenio Pérez 1154dd036d8dSEugenio Pérez return 0; 1155dd036d8dSEugenio Pérez } 1156dd036d8dSEugenio Pérez 1157f8972b56SEugenio Pérez static NetClientInfo net_vhost_vdpa_cvq_info = { 1158f8972b56SEugenio Pérez .type = NET_CLIENT_DRIVER_VHOST_VDPA, 1159f8972b56SEugenio Pérez .size = sizeof(VhostVDPAState), 1160f8972b56SEugenio Pérez .receive = vhost_vdpa_receive, 11617a7f87e9SEugenio Pérez .start = vhost_vdpa_net_cvq_start, 1162f3fada59SEugenio Pérez .load = vhost_vdpa_net_cvq_load, 11637a7f87e9SEugenio Pérez .stop = vhost_vdpa_net_cvq_stop, 1164f8972b56SEugenio Pérez .cleanup = vhost_vdpa_cleanup, 1165f8972b56SEugenio Pérez .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, 1166f8972b56SEugenio Pérez .has_ufo = vhost_vdpa_has_ufo, 1167f8972b56SEugenio Pérez .check_peer_type = vhost_vdpa_check_peer_type, 1168f8972b56SEugenio Pérez }; 1169f8972b56SEugenio Pérez 1170fee364e4SHawkins Jiawei /* 1171fee364e4SHawkins Jiawei * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to 1172fee364e4SHawkins Jiawei * vdpa device. 1173fee364e4SHawkins Jiawei * 1174fee364e4SHawkins Jiawei * Considering that QEMU cannot send the entire filter table to the 1175fee364e4SHawkins Jiawei * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ 1176fee364e4SHawkins Jiawei * command to enable promiscuous mode to receive all packets, 1177fee364e4SHawkins Jiawei * according to VirtIO standard, "Since there are no guarantees, 1178fee364e4SHawkins Jiawei * it can use a hash filter or silently switch to allmulti or 1179fee364e4SHawkins Jiawei * promiscuous mode if it is given too many addresses.". 1180fee364e4SHawkins Jiawei * 1181fee364e4SHawkins Jiawei * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and 1182fee364e4SHawkins Jiawei * marks `n->mac_table.x_overflow` accordingly, it should have 1183fee364e4SHawkins Jiawei * the same effect on the device model to receive 1184fee364e4SHawkins Jiawei * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses. 1185fee364e4SHawkins Jiawei * The same applies to multicast MAC addresses. 1186fee364e4SHawkins Jiawei * 1187fee364e4SHawkins Jiawei * Therefore, QEMU can provide the device model with a fake 1188fee364e4SHawkins Jiawei * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1) 1189fee364e4SHawkins Jiawei * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast 1190fee364e4SHawkins Jiawei * MAC addresses. This ensures that the device model marks 1191fee364e4SHawkins Jiawei * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`, 1192fee364e4SHawkins Jiawei * allowing all packets to be received, which aligns with the 1193fee364e4SHawkins Jiawei * state of the vdpa device. 1194fee364e4SHawkins Jiawei */ 1195fee364e4SHawkins Jiawei static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s, 1196fee364e4SHawkins Jiawei VirtQueueElement *elem, 1197327dedb8SHawkins Jiawei struct iovec *out, 1198327dedb8SHawkins Jiawei const struct iovec *in) 1199fee364e4SHawkins Jiawei { 1200fee364e4SHawkins Jiawei struct virtio_net_ctrl_mac mac_data, *mac_ptr; 1201fee364e4SHawkins Jiawei struct virtio_net_ctrl_hdr *hdr_ptr; 1202fee364e4SHawkins Jiawei uint32_t cursor; 1203fee364e4SHawkins Jiawei ssize_t r; 1204327dedb8SHawkins Jiawei uint8_t on = 1; 1205fee364e4SHawkins Jiawei 1206fee364e4SHawkins Jiawei /* parse the non-multicast MAC address entries from CVQ command */ 1207fee364e4SHawkins Jiawei cursor = sizeof(*hdr_ptr); 1208fee364e4SHawkins Jiawei r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1209fee364e4SHawkins Jiawei &mac_data, sizeof(mac_data)); 1210fee364e4SHawkins Jiawei if (unlikely(r != sizeof(mac_data))) { 1211fee364e4SHawkins Jiawei /* 1212fee364e4SHawkins Jiawei * If the CVQ command is invalid, we should simulate the vdpa device 1213fee364e4SHawkins Jiawei * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1214fee364e4SHawkins Jiawei */ 1215fee364e4SHawkins Jiawei *s->status = VIRTIO_NET_ERR; 1216fee364e4SHawkins Jiawei return sizeof(*s->status); 1217fee364e4SHawkins Jiawei } 1218fee364e4SHawkins Jiawei cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1219fee364e4SHawkins Jiawei 1220fee364e4SHawkins Jiawei /* parse the multicast MAC address entries from CVQ command */ 1221fee364e4SHawkins Jiawei r = iov_to_buf(elem->out_sg, elem->out_num, cursor, 1222fee364e4SHawkins Jiawei &mac_data, sizeof(mac_data)); 1223fee364e4SHawkins Jiawei if (r != sizeof(mac_data)) { 1224fee364e4SHawkins Jiawei /* 1225fee364e4SHawkins Jiawei * If the CVQ command is invalid, we should simulate the vdpa device 1226fee364e4SHawkins Jiawei * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1227fee364e4SHawkins Jiawei */ 1228fee364e4SHawkins Jiawei *s->status = VIRTIO_NET_ERR; 1229fee364e4SHawkins Jiawei return sizeof(*s->status); 1230fee364e4SHawkins Jiawei } 1231fee364e4SHawkins Jiawei cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN; 1232fee364e4SHawkins Jiawei 1233fee364e4SHawkins Jiawei /* validate the CVQ command */ 1234fee364e4SHawkins Jiawei if (iov_size(elem->out_sg, elem->out_num) != cursor) { 1235fee364e4SHawkins Jiawei /* 1236fee364e4SHawkins Jiawei * If the CVQ command is invalid, we should simulate the vdpa device 1237fee364e4SHawkins Jiawei * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1238fee364e4SHawkins Jiawei */ 1239fee364e4SHawkins Jiawei *s->status = VIRTIO_NET_ERR; 1240fee364e4SHawkins Jiawei return sizeof(*s->status); 1241fee364e4SHawkins Jiawei } 1242fee364e4SHawkins Jiawei 1243fee364e4SHawkins Jiawei /* 1244fee364e4SHawkins Jiawei * According to VirtIO standard, "Since there are no guarantees, 1245fee364e4SHawkins Jiawei * it can use a hash filter or silently switch to allmulti or 1246fee364e4SHawkins Jiawei * promiscuous mode if it is given too many addresses.". 1247fee364e4SHawkins Jiawei * 1248fee364e4SHawkins Jiawei * Therefore, considering that QEMU is unable to send the entire 1249fee364e4SHawkins Jiawei * filter table to the vdpa device, it should send the 1250fee364e4SHawkins Jiawei * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode 1251fee364e4SHawkins Jiawei */ 1252327dedb8SHawkins Jiawei hdr_ptr = out->iov_base; 1253327dedb8SHawkins Jiawei out->iov_len = sizeof(*hdr_ptr) + sizeof(on); 1254327dedb8SHawkins Jiawei 1255327dedb8SHawkins Jiawei hdr_ptr->class = VIRTIO_NET_CTRL_RX; 1256327dedb8SHawkins Jiawei hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; 1257327dedb8SHawkins Jiawei iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on)); 1258327dedb8SHawkins Jiawei r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1); 1259fee364e4SHawkins Jiawei if (unlikely(r < 0)) { 1260fee364e4SHawkins Jiawei return r; 1261fee364e4SHawkins Jiawei } 1262a864a321SHawkins Jiawei 1263a864a321SHawkins Jiawei /* 1264a864a321SHawkins Jiawei * We can poll here since we've had BQL from the time 1265a864a321SHawkins Jiawei * we sent the descriptor. 1266a864a321SHawkins Jiawei */ 1267a864a321SHawkins Jiawei r = vhost_vdpa_net_svq_poll(s, 1); 1268a864a321SHawkins Jiawei if (unlikely(r < sizeof(*s->status))) { 1269a864a321SHawkins Jiawei return r; 1270a864a321SHawkins Jiawei } 1271fee364e4SHawkins Jiawei if (*s->status != VIRTIO_NET_OK) { 1272fee364e4SHawkins Jiawei return sizeof(*s->status); 1273fee364e4SHawkins Jiawei } 1274fee364e4SHawkins Jiawei 1275fee364e4SHawkins Jiawei /* 1276fee364e4SHawkins Jiawei * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ 1277fee364e4SHawkins Jiawei * command to the device model, including (`MAC_TABLE_ENTRIES` + 1) 1278fee364e4SHawkins Jiawei * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) 1279fee364e4SHawkins Jiawei * multicast MAC addresses. 1280fee364e4SHawkins Jiawei * 1281fee364e4SHawkins Jiawei * By doing so, the device model can mark `n->mac_table.uni_overflow` 1282fee364e4SHawkins Jiawei * and `n->mac_table.multi_overflow`, enabling all packets to be 1283fee364e4SHawkins Jiawei * received, which aligns with the state of the vdpa device. 1284fee364e4SHawkins Jiawei */ 1285fee364e4SHawkins Jiawei cursor = 0; 1286fee364e4SHawkins Jiawei uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1, 1287fee364e4SHawkins Jiawei fake_mul_entries = MAC_TABLE_ENTRIES + 1, 1288fee364e4SHawkins Jiawei fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) + 1289fee364e4SHawkins Jiawei sizeof(mac_data) + fake_uni_entries * ETH_ALEN + 1290fee364e4SHawkins Jiawei sizeof(mac_data) + fake_mul_entries * ETH_ALEN; 1291fee364e4SHawkins Jiawei 1292fee364e4SHawkins Jiawei assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len()); 1293fee364e4SHawkins Jiawei out->iov_len = fake_cvq_size; 1294fee364e4SHawkins Jiawei 1295fee364e4SHawkins Jiawei /* pack the header for fake CVQ command */ 1296fee364e4SHawkins Jiawei hdr_ptr = out->iov_base + cursor; 1297fee364e4SHawkins Jiawei hdr_ptr->class = VIRTIO_NET_CTRL_MAC; 1298fee364e4SHawkins Jiawei hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; 1299fee364e4SHawkins Jiawei cursor += sizeof(*hdr_ptr); 1300fee364e4SHawkins Jiawei 1301fee364e4SHawkins Jiawei /* 1302fee364e4SHawkins Jiawei * Pack the non-multicast MAC addresses part for fake CVQ command. 1303fee364e4SHawkins Jiawei * 1304fee364e4SHawkins Jiawei * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 13050a19d879SMichael Tokarev * addresses provided in CVQ command. Therefore, only the entries 1306fee364e4SHawkins Jiawei * field need to be prepared in the CVQ command. 1307fee364e4SHawkins Jiawei */ 1308fee364e4SHawkins Jiawei mac_ptr = out->iov_base + cursor; 1309fee364e4SHawkins Jiawei mac_ptr->entries = cpu_to_le32(fake_uni_entries); 1310fee364e4SHawkins Jiawei cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN; 1311fee364e4SHawkins Jiawei 1312fee364e4SHawkins Jiawei /* 1313fee364e4SHawkins Jiawei * Pack the multicast MAC addresses part for fake CVQ command. 1314fee364e4SHawkins Jiawei * 1315fee364e4SHawkins Jiawei * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC 13160a19d879SMichael Tokarev * addresses provided in CVQ command. Therefore, only the entries 1317fee364e4SHawkins Jiawei * field need to be prepared in the CVQ command. 1318fee364e4SHawkins Jiawei */ 1319fee364e4SHawkins Jiawei mac_ptr = out->iov_base + cursor; 1320fee364e4SHawkins Jiawei mac_ptr->entries = cpu_to_le32(fake_mul_entries); 1321fee364e4SHawkins Jiawei 1322fee364e4SHawkins Jiawei /* 1323fee364e4SHawkins Jiawei * Simulating QEMU poll a vdpa device used buffer 1324fee364e4SHawkins Jiawei * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command 1325fee364e4SHawkins Jiawei */ 1326fee364e4SHawkins Jiawei return sizeof(*s->status); 1327fee364e4SHawkins Jiawei } 1328fee364e4SHawkins Jiawei 13292df4dd31SEugenio Pérez /** 13302df4dd31SEugenio Pérez * Validate and copy control virtqueue commands. 13312df4dd31SEugenio Pérez * 13322df4dd31SEugenio Pérez * Following QEMU guidelines, we offer a copy of the buffers to the device to 13332df4dd31SEugenio Pérez * prevent TOCTOU bugs. 1334bd907ae4SEugenio Pérez */ 1335bd907ae4SEugenio Pérez static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, 1336bd907ae4SEugenio Pérez VirtQueueElement *elem, 1337bd907ae4SEugenio Pérez void *opaque) 1338bd907ae4SEugenio Pérez { 13392df4dd31SEugenio Pérez VhostVDPAState *s = opaque; 1340be4278b6SEugenio Pérez size_t in_len; 134145c41018SHawkins Jiawei const struct virtio_net_ctrl_hdr *ctrl; 1342bd907ae4SEugenio Pérez virtio_net_ctrl_ack status = VIRTIO_NET_ERR; 13437a7f87e9SEugenio Pérez /* Out buffer sent to both the vdpa device and the device model */ 13447a7f87e9SEugenio Pérez struct iovec out = { 13457a7f87e9SEugenio Pérez .iov_base = s->cvq_cmd_out_buffer, 13467a7f87e9SEugenio Pérez }; 13472df4dd31SEugenio Pérez /* in buffer used for device model */ 13480e6bff0dSHawkins Jiawei const struct iovec model_in = { 13492df4dd31SEugenio Pérez .iov_base = &status, 13502df4dd31SEugenio Pérez .iov_len = sizeof(status), 13512df4dd31SEugenio Pérez }; 13520e6bff0dSHawkins Jiawei /* in buffer used for vdpa device */ 13530e6bff0dSHawkins Jiawei const struct iovec vdpa_in = { 13540e6bff0dSHawkins Jiawei .iov_base = s->status, 13550e6bff0dSHawkins Jiawei .iov_len = sizeof(*s->status), 13560e6bff0dSHawkins Jiawei }; 1357be4278b6SEugenio Pérez ssize_t dev_written = -EINVAL; 1358bd907ae4SEugenio Pérez 13597a7f87e9SEugenio Pérez out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, 13607a7f87e9SEugenio Pérez s->cvq_cmd_out_buffer, 1361fee364e4SHawkins Jiawei vhost_vdpa_net_cvq_cmd_page_len()); 136245c41018SHawkins Jiawei 136345c41018SHawkins Jiawei ctrl = s->cvq_cmd_out_buffer; 136445c41018SHawkins Jiawei if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { 13653f9a3eebSEugenio Pérez /* 13663f9a3eebSEugenio Pérez * Guest announce capability is emulated by qemu, so don't forward to 13673f9a3eebSEugenio Pérez * the device. 13683f9a3eebSEugenio Pérez */ 13693f9a3eebSEugenio Pérez dev_written = sizeof(status); 13703f9a3eebSEugenio Pérez *s->status = VIRTIO_NET_OK; 1371fee364e4SHawkins Jiawei } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && 1372fee364e4SHawkins Jiawei ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && 1373fee364e4SHawkins Jiawei iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { 1374fee364e4SHawkins Jiawei /* 1375fee364e4SHawkins Jiawei * Due to the size limitation of the out buffer sent to the vdpa device, 1376fee364e4SHawkins Jiawei * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive 1377fee364e4SHawkins Jiawei * MAC addresses set by the driver for the filter table can cause 1378fee364e4SHawkins Jiawei * truncation of the CVQ command in QEMU. As a result, the vdpa device 1379fee364e4SHawkins Jiawei * rejects the flawed CVQ command. 1380fee364e4SHawkins Jiawei * 1381fee364e4SHawkins Jiawei * Therefore, QEMU must handle this situation instead of sending 13820a19d879SMichael Tokarev * the CVQ command directly. 1383fee364e4SHawkins Jiawei */ 1384fee364e4SHawkins Jiawei dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem, 1385327dedb8SHawkins Jiawei &out, &vdpa_in); 1386fee364e4SHawkins Jiawei if (unlikely(dev_written < 0)) { 1387fee364e4SHawkins Jiawei goto out; 1388fee364e4SHawkins Jiawei } 13893f9a3eebSEugenio Pérez } else { 1390a864a321SHawkins Jiawei ssize_t r; 1391a864a321SHawkins Jiawei r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1); 1392a864a321SHawkins Jiawei if (unlikely(r < 0)) { 1393a864a321SHawkins Jiawei dev_written = r; 1394bd907ae4SEugenio Pérez goto out; 1395bd907ae4SEugenio Pérez } 1396a864a321SHawkins Jiawei 1397a864a321SHawkins Jiawei /* 1398a864a321SHawkins Jiawei * We can poll here since we've had BQL from the time 1399a864a321SHawkins Jiawei * we sent the descriptor. 1400a864a321SHawkins Jiawei */ 1401a864a321SHawkins Jiawei dev_written = vhost_vdpa_net_svq_poll(s, 1); 14023f9a3eebSEugenio Pérez } 1403bd907ae4SEugenio Pérez 1404bd907ae4SEugenio Pérez if (unlikely(dev_written < sizeof(status))) { 1405bd907ae4SEugenio Pérez error_report("Insufficient written data (%zu)", dev_written); 14062df4dd31SEugenio Pérez goto out; 14072df4dd31SEugenio Pérez } 14082df4dd31SEugenio Pérez 140917fb889fSEugenio Pérez if (*s->status != VIRTIO_NET_OK) { 1410d45243bcSEugenio Pérez goto out; 14112df4dd31SEugenio Pérez } 14122df4dd31SEugenio Pérez 14132df4dd31SEugenio Pérez status = VIRTIO_NET_ERR; 14140e6bff0dSHawkins Jiawei virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); 14152df4dd31SEugenio Pérez if (status != VIRTIO_NET_OK) { 14162df4dd31SEugenio Pérez error_report("Bad CVQ processing in model"); 1417bd907ae4SEugenio Pérez } 1418bd907ae4SEugenio Pérez 1419bd907ae4SEugenio Pérez out: 1420bd907ae4SEugenio Pérez in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, 1421bd907ae4SEugenio Pérez sizeof(status)); 1422bd907ae4SEugenio Pérez if (unlikely(in_len < sizeof(status))) { 1423bd907ae4SEugenio Pérez error_report("Bad device CVQ written length"); 1424bd907ae4SEugenio Pérez } 1425bd907ae4SEugenio Pérez vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); 1426031b1abaSHawkins Jiawei /* 1427031b1abaSHawkins Jiawei * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when 1428031b1abaSHawkins Jiawei * the function successfully forwards the CVQ command, indicated 1429031b1abaSHawkins Jiawei * by a non-negative value of `dev_written`. Otherwise, it still 1430031b1abaSHawkins Jiawei * belongs to SVQ. 1431031b1abaSHawkins Jiawei * This function should only free the `elem` when it owns. 1432031b1abaSHawkins Jiawei */ 1433031b1abaSHawkins Jiawei if (dev_written >= 0) { 1434bd907ae4SEugenio Pérez g_free(elem); 1435031b1abaSHawkins Jiawei } 1436be4278b6SEugenio Pérez return dev_written < 0 ? dev_written : 0; 1437bd907ae4SEugenio Pérez } 1438bd907ae4SEugenio Pérez 1439bd907ae4SEugenio Pérez static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { 1440bd907ae4SEugenio Pérez .avail_handler = vhost_vdpa_net_handle_ctrl_avail, 1441bd907ae4SEugenio Pérez }; 1442bd907ae4SEugenio Pérez 1443152128d6SEugenio Pérez /** 1444152128d6SEugenio Pérez * Probe if CVQ is isolated 1445152128d6SEugenio Pérez * 1446152128d6SEugenio Pérez * @device_fd The vdpa device fd 1447152128d6SEugenio Pérez * @features Features offered by the device. 1448152128d6SEugenio Pérez * @cvq_index The control vq pair index 1449152128d6SEugenio Pérez * 1450152128d6SEugenio Pérez * Returns <0 in case of failure, 0 if false and 1 if true. 1451152128d6SEugenio Pérez */ 1452152128d6SEugenio Pérez static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, 1453152128d6SEugenio Pérez int cvq_index, Error **errp) 1454152128d6SEugenio Pérez { 1455152128d6SEugenio Pérez uint64_t backend_features; 1456152128d6SEugenio Pérez int64_t cvq_group; 1457152128d6SEugenio Pérez uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE | 1458845ec38aSEugenio Pérez VIRTIO_CONFIG_S_DRIVER; 1459152128d6SEugenio Pérez int r; 1460152128d6SEugenio Pérez 1461152128d6SEugenio Pérez ERRP_GUARD(); 1462152128d6SEugenio Pérez 1463152128d6SEugenio Pérez r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); 1464152128d6SEugenio Pérez if (unlikely(r < 0)) { 1465152128d6SEugenio Pérez error_setg_errno(errp, errno, "Cannot get vdpa backend_features"); 1466152128d6SEugenio Pérez return r; 1467152128d6SEugenio Pérez } 1468152128d6SEugenio Pérez 1469152128d6SEugenio Pérez if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) { 1470152128d6SEugenio Pérez return 0; 1471152128d6SEugenio Pérez } 1472152128d6SEugenio Pérez 1473845ec38aSEugenio Pérez r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1474152128d6SEugenio Pérez if (unlikely(r)) { 1475845ec38aSEugenio Pérez error_setg_errno(errp, -r, "Cannot set device status"); 1476f1085882SEugenio Pérez goto out; 1477152128d6SEugenio Pérez } 1478152128d6SEugenio Pérez 1479845ec38aSEugenio Pérez r = ioctl(device_fd, VHOST_SET_FEATURES, &features); 1480845ec38aSEugenio Pérez if (unlikely(r)) { 1481845ec38aSEugenio Pérez error_setg_errno(errp, -r, "Cannot set features"); 1482845ec38aSEugenio Pérez goto out; 1483845ec38aSEugenio Pérez } 1484845ec38aSEugenio Pérez 1485845ec38aSEugenio Pérez status |= VIRTIO_CONFIG_S_FEATURES_OK; 1486152128d6SEugenio Pérez r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1487152128d6SEugenio Pérez if (unlikely(r)) { 1488845ec38aSEugenio Pérez error_setg_errno(errp, -r, "Cannot set device status"); 1489152128d6SEugenio Pérez goto out; 1490152128d6SEugenio Pérez } 1491152128d6SEugenio Pérez 1492152128d6SEugenio Pérez cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp); 1493152128d6SEugenio Pérez if (unlikely(cvq_group < 0)) { 1494152128d6SEugenio Pérez if (cvq_group != -ENOTSUP) { 1495152128d6SEugenio Pérez r = cvq_group; 1496152128d6SEugenio Pérez goto out; 1497152128d6SEugenio Pérez } 1498152128d6SEugenio Pérez 1499152128d6SEugenio Pérez /* 1500152128d6SEugenio Pérez * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend 1501152128d6SEugenio Pérez * support ASID even if the parent driver does not. The CVQ cannot be 1502152128d6SEugenio Pérez * isolated in this case. 1503152128d6SEugenio Pérez */ 1504152128d6SEugenio Pérez error_free(*errp); 1505152128d6SEugenio Pérez *errp = NULL; 1506152128d6SEugenio Pérez r = 0; 1507152128d6SEugenio Pérez goto out; 1508152128d6SEugenio Pérez } 1509152128d6SEugenio Pérez 1510152128d6SEugenio Pérez for (int i = 0; i < cvq_index; ++i) { 1511152128d6SEugenio Pérez int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); 1512152128d6SEugenio Pérez if (unlikely(group < 0)) { 1513152128d6SEugenio Pérez r = group; 1514152128d6SEugenio Pérez goto out; 1515152128d6SEugenio Pérez } 1516152128d6SEugenio Pérez 1517152128d6SEugenio Pérez if (group == (int64_t)cvq_group) { 1518152128d6SEugenio Pérez r = 0; 1519152128d6SEugenio Pérez goto out; 1520152128d6SEugenio Pérez } 1521152128d6SEugenio Pérez } 1522152128d6SEugenio Pérez 1523152128d6SEugenio Pérez r = 1; 1524152128d6SEugenio Pérez 1525152128d6SEugenio Pérez out: 1526152128d6SEugenio Pérez status = 0; 1527152128d6SEugenio Pérez ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); 1528152128d6SEugenio Pérez return r; 1529152128d6SEugenio Pérez } 1530152128d6SEugenio Pérez 1531654790b6SJason Wang static NetClientState *net_vhost_vdpa_init(NetClientState *peer, 1532654790b6SJason Wang const char *device, 1533654790b6SJason Wang const char *name, 153440237840SJason Wang int vdpa_device_fd, 153540237840SJason Wang int queue_pair_index, 153640237840SJason Wang int nvqs, 15371576dbb5SEugenio Pérez bool is_datapath, 15381576dbb5SEugenio Pérez bool svq, 15395c1ebd4cSEugenio Pérez struct vhost_vdpa_iova_range iova_range, 1540152128d6SEugenio Pérez uint64_t features, 1541152128d6SEugenio Pérez Error **errp) 15421e0a84eaSCindy Lu { 15431e0a84eaSCindy Lu NetClientState *nc = NULL; 15441e0a84eaSCindy Lu VhostVDPAState *s; 15451e0a84eaSCindy Lu int ret = 0; 15461e0a84eaSCindy Lu assert(name); 1547e77db790SStefan Hajnoczi int cvq_isolated = 0; 1548152128d6SEugenio Pérez 154940237840SJason Wang if (is_datapath) { 155040237840SJason Wang nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, 155140237840SJason Wang name); 155240237840SJason Wang } else { 1553152128d6SEugenio Pérez cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, 1554152128d6SEugenio Pérez queue_pair_index * 2, 1555152128d6SEugenio Pérez errp); 1556152128d6SEugenio Pérez if (unlikely(cvq_isolated < 0)) { 1557152128d6SEugenio Pérez return NULL; 1558152128d6SEugenio Pérez } 1559152128d6SEugenio Pérez 1560f8972b56SEugenio Pérez nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, 156140237840SJason Wang device, name); 156240237840SJason Wang } 156353b85d95SLaurent Vivier qemu_set_info_str(nc, TYPE_VHOST_VDPA); 15641e0a84eaSCindy Lu s = DO_UPCAST(VhostVDPAState, nc, nc); 15657327813dSJason Wang 15661e0a84eaSCindy Lu s->vhost_vdpa.device_fd = vdpa_device_fd; 156740237840SJason Wang s->vhost_vdpa.index = queue_pair_index; 15687f211a28SEugenio Pérez s->always_svq = svq; 156969498430SEugenio Pérez s->migration_state.notify = vdpa_net_migration_state_notifier; 15701576dbb5SEugenio Pérez s->vhost_vdpa.shadow_vqs_enabled = svq; 1571a585fad2SEugenio Pérez s->vhost_vdpa.iova_range = iova_range; 15726188d78aSEugenio Pérez s->vhost_vdpa.shadow_data = svq; 15735c1ebd4cSEugenio Pérez if (queue_pair_index == 0) { 15745c1ebd4cSEugenio Pérez vhost_vdpa_net_valid_svq_features(features, 15755c1ebd4cSEugenio Pérez &s->vhost_vdpa.migration_blocker); 15765c1ebd4cSEugenio Pérez } else if (!is_datapath) { 1577babf8b87SEugenio Pérez s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1578babf8b87SEugenio Pérez PROT_READ | PROT_WRITE, 1579babf8b87SEugenio Pérez MAP_SHARED | MAP_ANONYMOUS, -1, 0); 1580babf8b87SEugenio Pérez s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), 1581babf8b87SEugenio Pérez PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 1582babf8b87SEugenio Pérez -1, 0); 15832df4dd31SEugenio Pérez 1584bd907ae4SEugenio Pérez s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; 1585bd907ae4SEugenio Pérez s->vhost_vdpa.shadow_vq_ops_opaque = s; 1586152128d6SEugenio Pérez s->cvq_isolated = cvq_isolated; 15878bc0049eSEugenio Pérez } 158840237840SJason Wang ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); 158974af5eecSJason Wang if (ret) { 159074af5eecSJason Wang qemu_del_net_client(nc); 1591654790b6SJason Wang return NULL; 159274af5eecSJason Wang } 1593654790b6SJason Wang return nc; 15941e0a84eaSCindy Lu } 15951e0a84eaSCindy Lu 15968170ab3fSEugenio Pérez static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) 15978170ab3fSEugenio Pérez { 15988170ab3fSEugenio Pérez int ret = ioctl(fd, VHOST_GET_FEATURES, features); 15998170ab3fSEugenio Pérez if (unlikely(ret < 0)) { 16008170ab3fSEugenio Pérez error_setg_errno(errp, errno, 16018170ab3fSEugenio Pérez "Fail to query features from vhost-vDPA device"); 16028170ab3fSEugenio Pérez } 16038170ab3fSEugenio Pérez return ret; 16048170ab3fSEugenio Pérez } 16058170ab3fSEugenio Pérez 16068170ab3fSEugenio Pérez static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, 16078170ab3fSEugenio Pérez int *has_cvq, Error **errp) 160840237840SJason Wang { 160940237840SJason Wang unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); 1610cd523a41SStefano Garzarella g_autofree struct vhost_vdpa_config *config = NULL; 161140237840SJason Wang __virtio16 *max_queue_pairs; 161240237840SJason Wang int ret; 161340237840SJason Wang 161440237840SJason Wang if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { 161540237840SJason Wang *has_cvq = 1; 161640237840SJason Wang } else { 161740237840SJason Wang *has_cvq = 0; 161840237840SJason Wang } 161940237840SJason Wang 162040237840SJason Wang if (features & (1 << VIRTIO_NET_F_MQ)) { 162140237840SJason Wang config = g_malloc0(config_size + sizeof(*max_queue_pairs)); 162240237840SJason Wang config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); 162340237840SJason Wang config->len = sizeof(*max_queue_pairs); 162440237840SJason Wang 162540237840SJason Wang ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); 162640237840SJason Wang if (ret) { 162740237840SJason Wang error_setg(errp, "Fail to get config from vhost-vDPA device"); 162840237840SJason Wang return -ret; 162940237840SJason Wang } 163040237840SJason Wang 163140237840SJason Wang max_queue_pairs = (__virtio16 *)&config->buf; 163240237840SJason Wang 163340237840SJason Wang return lduw_le_p(max_queue_pairs); 163440237840SJason Wang } 163540237840SJason Wang 163640237840SJason Wang return 1; 163740237840SJason Wang } 163840237840SJason Wang 16391e0a84eaSCindy Lu int net_init_vhost_vdpa(const Netdev *netdev, const char *name, 16401e0a84eaSCindy Lu NetClientState *peer, Error **errp) 16411e0a84eaSCindy Lu { 16421e0a84eaSCindy Lu const NetdevVhostVDPAOptions *opts; 16438170ab3fSEugenio Pérez uint64_t features; 1644654790b6SJason Wang int vdpa_device_fd; 1645eb3cb751SEugenio Pérez g_autofree NetClientState **ncs = NULL; 1646a585fad2SEugenio Pérez struct vhost_vdpa_iova_range iova_range; 1647eb3cb751SEugenio Pérez NetClientState *nc; 1648aed5da45SEugenio Pérez int queue_pairs, r, i = 0, has_cvq = 0; 16491e0a84eaSCindy Lu 16501e0a84eaSCindy Lu assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); 16511e0a84eaSCindy Lu opts = &netdev->u.vhost_vdpa; 16527480874aSMarkus Armbruster if (!opts->vhostdev && !opts->vhostfd) { 16538801ccd0SSi-Wei Liu error_setg(errp, 16548801ccd0SSi-Wei Liu "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); 1655c8295404SEugenio Pérez return -1; 1656c8295404SEugenio Pérez } 16577327813dSJason Wang 16587480874aSMarkus Armbruster if (opts->vhostdev && opts->vhostfd) { 16598801ccd0SSi-Wei Liu error_setg(errp, 16608801ccd0SSi-Wei Liu "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); 16618801ccd0SSi-Wei Liu return -1; 16628801ccd0SSi-Wei Liu } 16638801ccd0SSi-Wei Liu 16647480874aSMarkus Armbruster if (opts->vhostdev) { 16650351152bSEugenio Pérez vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); 16667327813dSJason Wang if (vdpa_device_fd == -1) { 16677327813dSJason Wang return -errno; 16687327813dSJason Wang } 16695107fd3eSPeter Maydell } else { 16705107fd3eSPeter Maydell /* has_vhostfd */ 16718801ccd0SSi-Wei Liu vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); 16728801ccd0SSi-Wei Liu if (vdpa_device_fd == -1) { 16738801ccd0SSi-Wei Liu error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); 16748801ccd0SSi-Wei Liu return -1; 16758801ccd0SSi-Wei Liu } 16768801ccd0SSi-Wei Liu } 16777327813dSJason Wang 16788170ab3fSEugenio Pérez r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); 16798170ab3fSEugenio Pérez if (unlikely(r < 0)) { 1680aed5da45SEugenio Pérez goto err; 16818170ab3fSEugenio Pérez } 16828170ab3fSEugenio Pérez 16838170ab3fSEugenio Pérez queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, 168440237840SJason Wang &has_cvq, errp); 168540237840SJason Wang if (queue_pairs < 0) { 16867327813dSJason Wang qemu_close(vdpa_device_fd); 168740237840SJason Wang return queue_pairs; 16887327813dSJason Wang } 16897327813dSJason Wang 1690bf7a2ad8SLongpeng r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); 1691bf7a2ad8SLongpeng if (unlikely(r < 0)) { 1692bf7a2ad8SLongpeng error_setg(errp, "vhost-vdpa: get iova range failed: %s", 1693bf7a2ad8SLongpeng strerror(-r)); 1694bf7a2ad8SLongpeng goto err; 1695bf7a2ad8SLongpeng } 1696bf7a2ad8SLongpeng 169700ef422eSEugenio Pérez if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { 169800ef422eSEugenio Pérez goto err; 16991576dbb5SEugenio Pérez } 17001576dbb5SEugenio Pérez 170140237840SJason Wang ncs = g_malloc0(sizeof(*ncs) * queue_pairs); 170240237840SJason Wang 170340237840SJason Wang for (i = 0; i < queue_pairs; i++) { 170440237840SJason Wang ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 17051576dbb5SEugenio Pérez vdpa_device_fd, i, 2, true, opts->x_svq, 1706152128d6SEugenio Pérez iova_range, features, errp); 170740237840SJason Wang if (!ncs[i]) 170840237840SJason Wang goto err; 170940237840SJason Wang } 171040237840SJason Wang 171140237840SJason Wang if (has_cvq) { 171240237840SJason Wang nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, 17131576dbb5SEugenio Pérez vdpa_device_fd, i, 1, false, 1714152128d6SEugenio Pérez opts->x_svq, iova_range, features, errp); 171540237840SJason Wang if (!nc) 171640237840SJason Wang goto err; 171740237840SJason Wang } 171840237840SJason Wang 1719654790b6SJason Wang return 0; 172040237840SJason Wang 172140237840SJason Wang err: 172240237840SJason Wang if (i) { 17239bd05507SSi-Wei Liu for (i--; i >= 0; i--) { 17249bd05507SSi-Wei Liu qemu_del_net_client(ncs[i]); 17259bd05507SSi-Wei Liu } 172640237840SJason Wang } 17271576dbb5SEugenio Pérez 172840237840SJason Wang qemu_close(vdpa_device_fd); 172940237840SJason Wang 173040237840SJason Wang return -1; 17311e0a84eaSCindy Lu } 1732