xref: /openbmc/qemu/net/vhost-vdpa.c (revision fb96d131)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "hw/virtio/vhost.h"
30 
31 /* Todo:need to add the multiqueue support here */
32 typedef struct VhostVDPAState {
33     NetClientState nc;
34     struct vhost_vdpa vhost_vdpa;
35     VHostNetState *vhost_net;
36 
37     /* Control commands shadow buffers */
38     void *cvq_cmd_out_buffer, *cvq_cmd_in_buffer;
39     bool started;
40 } VhostVDPAState;
41 
42 const int vdpa_feature_bits[] = {
43     VIRTIO_F_NOTIFY_ON_EMPTY,
44     VIRTIO_RING_F_INDIRECT_DESC,
45     VIRTIO_RING_F_EVENT_IDX,
46     VIRTIO_F_ANY_LAYOUT,
47     VIRTIO_F_VERSION_1,
48     VIRTIO_NET_F_CSUM,
49     VIRTIO_NET_F_GUEST_CSUM,
50     VIRTIO_NET_F_GSO,
51     VIRTIO_NET_F_GUEST_TSO4,
52     VIRTIO_NET_F_GUEST_TSO6,
53     VIRTIO_NET_F_GUEST_ECN,
54     VIRTIO_NET_F_GUEST_UFO,
55     VIRTIO_NET_F_HOST_TSO4,
56     VIRTIO_NET_F_HOST_TSO6,
57     VIRTIO_NET_F_HOST_ECN,
58     VIRTIO_NET_F_HOST_UFO,
59     VIRTIO_NET_F_MRG_RXBUF,
60     VIRTIO_NET_F_MTU,
61     VIRTIO_NET_F_CTRL_RX,
62     VIRTIO_NET_F_CTRL_RX_EXTRA,
63     VIRTIO_NET_F_CTRL_VLAN,
64     VIRTIO_NET_F_GUEST_ANNOUNCE,
65     VIRTIO_NET_F_CTRL_MAC_ADDR,
66     VIRTIO_NET_F_RSS,
67     VIRTIO_NET_F_MQ,
68     VIRTIO_NET_F_CTRL_VQ,
69     VIRTIO_F_IOMMU_PLATFORM,
70     VIRTIO_F_RING_PACKED,
71     VIRTIO_NET_F_RSS,
72     VIRTIO_NET_F_HASH_REPORT,
73     VIRTIO_NET_F_GUEST_ANNOUNCE,
74     VIRTIO_NET_F_STATUS,
75     VHOST_INVALID_FEATURE_BIT
76 };
77 
78 /** Supported device specific feature bits with SVQ */
79 static const uint64_t vdpa_svq_device_features =
80     BIT_ULL(VIRTIO_NET_F_CSUM) |
81     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
82     BIT_ULL(VIRTIO_NET_F_MTU) |
83     BIT_ULL(VIRTIO_NET_F_MAC) |
84     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
85     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
86     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
87     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
88     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
89     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
90     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
91     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
92     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
93     BIT_ULL(VIRTIO_NET_F_STATUS) |
94     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
95     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
96     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
97     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
98     BIT_ULL(VIRTIO_NET_F_STANDBY);
99 
100 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
101 {
102     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
103     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
104     return s->vhost_net;
105 }
106 
107 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
108 {
109     uint32_t device_id;
110     int ret;
111     struct vhost_dev *hdev;
112 
113     hdev = (struct vhost_dev *)&net->dev;
114     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
115     if (device_id != VIRTIO_ID_NET) {
116         return -ENOTSUP;
117     }
118     return ret;
119 }
120 
121 static int vhost_vdpa_add(NetClientState *ncs, void *be,
122                           int queue_pair_index, int nvqs)
123 {
124     VhostNetOptions options;
125     struct vhost_net *net = NULL;
126     VhostVDPAState *s;
127     int ret;
128 
129     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
130     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
131     s = DO_UPCAST(VhostVDPAState, nc, ncs);
132     options.net_backend = ncs;
133     options.opaque      = be;
134     options.busyloop_timeout = 0;
135     options.nvqs = nvqs;
136 
137     net = vhost_net_init(&options);
138     if (!net) {
139         error_report("failed to init vhost_net for queue");
140         goto err_init;
141     }
142     s->vhost_net = net;
143     ret = vhost_vdpa_net_check_device_id(net);
144     if (ret) {
145         goto err_check;
146     }
147     return 0;
148 err_check:
149     vhost_net_cleanup(net);
150     g_free(net);
151 err_init:
152     return -1;
153 }
154 
155 static void vhost_vdpa_cleanup(NetClientState *nc)
156 {
157     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
158     struct vhost_dev *dev = &s->vhost_net->dev;
159 
160     qemu_vfree(s->cvq_cmd_out_buffer);
161     qemu_vfree(s->cvq_cmd_in_buffer);
162     if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
163         g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
164     }
165     if (s->vhost_net) {
166         vhost_net_cleanup(s->vhost_net);
167         g_free(s->vhost_net);
168         s->vhost_net = NULL;
169     }
170      if (s->vhost_vdpa.device_fd >= 0) {
171         qemu_close(s->vhost_vdpa.device_fd);
172         s->vhost_vdpa.device_fd = -1;
173     }
174 }
175 
176 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
177 {
178     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
179 
180     return true;
181 }
182 
183 static bool vhost_vdpa_has_ufo(NetClientState *nc)
184 {
185     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
186     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
187     uint64_t features = 0;
188     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
189     features = vhost_net_get_features(s->vhost_net, features);
190     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
191 
192 }
193 
194 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
195                                        Error **errp)
196 {
197     const char *driver = object_class_get_name(oc);
198 
199     if (!g_str_has_prefix(driver, "virtio-net-")) {
200         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
201         return false;
202     }
203 
204     return true;
205 }
206 
207 /** Dummy receive in case qemu falls back to userland tap networking */
208 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
209                                   size_t size)
210 {
211     return 0;
212 }
213 
214 static NetClientInfo net_vhost_vdpa_info = {
215         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
216         .size = sizeof(VhostVDPAState),
217         .receive = vhost_vdpa_receive,
218         .cleanup = vhost_vdpa_cleanup,
219         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
220         .has_ufo = vhost_vdpa_has_ufo,
221         .check_peer_type = vhost_vdpa_check_peer_type,
222 };
223 
224 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
225 {
226     VhostIOVATree *tree = v->iova_tree;
227     DMAMap needle = {
228         /*
229          * No need to specify size or to look for more translations since
230          * this contiguous chunk was allocated by us.
231          */
232         .translated_addr = (hwaddr)(uintptr_t)addr,
233     };
234     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
235     int r;
236 
237     if (unlikely(!map)) {
238         error_report("Cannot locate expected map");
239         return;
240     }
241 
242     r = vhost_vdpa_dma_unmap(v, map->iova, map->size + 1);
243     if (unlikely(r != 0)) {
244         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
245     }
246 
247     vhost_iova_tree_remove(tree, *map);
248 }
249 
250 static size_t vhost_vdpa_net_cvq_cmd_len(void)
251 {
252     /*
253      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
254      * In buffer is always 1 byte, so it should fit here
255      */
256     return sizeof(struct virtio_net_ctrl_hdr) +
257            2 * sizeof(struct virtio_net_ctrl_mac) +
258            MAC_TABLE_ENTRIES * ETH_ALEN;
259 }
260 
261 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
262 {
263     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
264 }
265 
266 /** Map CVQ buffer. */
267 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
268                                   bool write)
269 {
270     DMAMap map = {};
271     int r;
272 
273     map.translated_addr = (hwaddr)(uintptr_t)buf;
274     map.size = size - 1;
275     map.perm = write ? IOMMU_RW : IOMMU_RO,
276     r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
277     if (unlikely(r != IOVA_OK)) {
278         error_report("Cannot map injected element");
279         return r;
280     }
281 
282     r = vhost_vdpa_dma_map(v, map.iova, vhost_vdpa_net_cvq_cmd_page_len(), buf,
283                            !write);
284     if (unlikely(r < 0)) {
285         goto dma_map_err;
286     }
287 
288     return 0;
289 
290 dma_map_err:
291     vhost_iova_tree_remove(v->iova_tree, map);
292     return r;
293 }
294 
295 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
296 {
297     VhostVDPAState *s;
298     int r;
299 
300     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
301 
302     s = DO_UPCAST(VhostVDPAState, nc, nc);
303     if (!s->vhost_vdpa.shadow_vqs_enabled) {
304         return 0;
305     }
306 
307     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
308                                vhost_vdpa_net_cvq_cmd_page_len(), false);
309     if (unlikely(r < 0)) {
310         return r;
311     }
312 
313     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer,
314                                vhost_vdpa_net_cvq_cmd_page_len(), true);
315     if (unlikely(r < 0)) {
316         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
317     }
318 
319     return r;
320 }
321 
322 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
323 {
324     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
325 
326     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
327 
328     if (s->vhost_vdpa.shadow_vqs_enabled) {
329         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
330         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_in_buffer);
331     }
332 }
333 
334 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
335                                       size_t in_len)
336 {
337     /* Buffers for the device */
338     const struct iovec out = {
339         .iov_base = s->cvq_cmd_out_buffer,
340         .iov_len = out_len,
341     };
342     const struct iovec in = {
343         .iov_base = s->cvq_cmd_in_buffer,
344         .iov_len = sizeof(virtio_net_ctrl_ack),
345     };
346     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
347     int r;
348 
349     r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
350     if (unlikely(r != 0)) {
351         if (unlikely(r == -ENOSPC)) {
352             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
353                           __func__);
354         }
355         return r;
356     }
357 
358     /*
359      * We can poll here since we've had BQL from the time we sent the
360      * descriptor. Also, we need to take the answer before SVQ pulls by itself,
361      * when BQL is released
362      */
363     return vhost_svq_poll(svq);
364 }
365 
366 static int vhost_vdpa_net_load(NetClientState *nc)
367 {
368     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
369     const struct vhost_vdpa *v = &s->vhost_vdpa;
370     const VirtIONet *n;
371     uint64_t features;
372 
373     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
374 
375     if (!v->shadow_vqs_enabled) {
376         return 0;
377     }
378 
379     n = VIRTIO_NET(v->dev->vdev);
380     features = n->parent_obj.guest_features;
381     if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) {
382         const struct virtio_net_ctrl_hdr ctrl = {
383             .class = VIRTIO_NET_CTRL_MAC,
384             .cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET,
385         };
386         char *cursor = s->cvq_cmd_out_buffer;
387         ssize_t dev_written;
388 
389         memcpy(cursor, &ctrl, sizeof(ctrl));
390         cursor += sizeof(ctrl);
391         memcpy(cursor, n->mac, sizeof(n->mac));
392 
393         dev_written = vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + sizeof(n->mac),
394                                              sizeof(virtio_net_ctrl_ack));
395         if (unlikely(dev_written < 0)) {
396             return dev_written;
397         }
398 
399         return *((virtio_net_ctrl_ack *)s->cvq_cmd_in_buffer) != VIRTIO_NET_OK;
400     }
401 
402     return 0;
403 }
404 
405 static NetClientInfo net_vhost_vdpa_cvq_info = {
406     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
407     .size = sizeof(VhostVDPAState),
408     .receive = vhost_vdpa_receive,
409     .start = vhost_vdpa_net_cvq_start,
410     .load = vhost_vdpa_net_load,
411     .stop = vhost_vdpa_net_cvq_stop,
412     .cleanup = vhost_vdpa_cleanup,
413     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
414     .has_ufo = vhost_vdpa_has_ufo,
415     .check_peer_type = vhost_vdpa_check_peer_type,
416 };
417 
418 /**
419  * Do not forward commands not supported by SVQ. Otherwise, the device could
420  * accept it and qemu would not know how to update the device model.
421  */
422 static bool vhost_vdpa_net_cvq_validate_cmd(const void *out_buf, size_t len)
423 {
424     struct virtio_net_ctrl_hdr ctrl;
425 
426     if (unlikely(len < sizeof(ctrl))) {
427         qemu_log_mask(LOG_GUEST_ERROR,
428                       "%s: invalid legnth of out buffer %zu\n", __func__, len);
429         return false;
430     }
431 
432     memcpy(&ctrl, out_buf, sizeof(ctrl));
433     switch (ctrl.class) {
434     case VIRTIO_NET_CTRL_MAC:
435         switch (ctrl.cmd) {
436         case VIRTIO_NET_CTRL_MAC_ADDR_SET:
437             return true;
438         default:
439             qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid mac cmd %u\n",
440                           __func__, ctrl.cmd);
441         };
442         break;
443     default:
444         qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid control class %u\n",
445                       __func__, ctrl.class);
446     };
447 
448     return false;
449 }
450 
451 /**
452  * Validate and copy control virtqueue commands.
453  *
454  * Following QEMU guidelines, we offer a copy of the buffers to the device to
455  * prevent TOCTOU bugs.
456  */
457 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
458                                             VirtQueueElement *elem,
459                                             void *opaque)
460 {
461     VhostVDPAState *s = opaque;
462     size_t in_len;
463     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
464     /* Out buffer sent to both the vdpa device and the device model */
465     struct iovec out = {
466         .iov_base = s->cvq_cmd_out_buffer,
467     };
468     /* in buffer used for device model */
469     const struct iovec in = {
470         .iov_base = &status,
471         .iov_len = sizeof(status),
472     };
473     ssize_t dev_written = -EINVAL;
474     bool ok;
475 
476     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
477                              s->cvq_cmd_out_buffer,
478                              vhost_vdpa_net_cvq_cmd_len());
479     ok = vhost_vdpa_net_cvq_validate_cmd(s->cvq_cmd_out_buffer, out.iov_len);
480     if (unlikely(!ok)) {
481         goto out;
482     }
483 
484     dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
485     if (unlikely(dev_written < 0)) {
486         goto out;
487     }
488 
489     if (unlikely(dev_written < sizeof(status))) {
490         error_report("Insufficient written data (%zu)", dev_written);
491         goto out;
492     }
493 
494     memcpy(&status, s->cvq_cmd_in_buffer, sizeof(status));
495     if (status != VIRTIO_NET_OK) {
496         return VIRTIO_NET_ERR;
497     }
498 
499     status = VIRTIO_NET_ERR;
500     virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
501     if (status != VIRTIO_NET_OK) {
502         error_report("Bad CVQ processing in model");
503     }
504 
505 out:
506     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
507                           sizeof(status));
508     if (unlikely(in_len < sizeof(status))) {
509         error_report("Bad device CVQ written length");
510     }
511     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
512     g_free(elem);
513     return dev_written < 0 ? dev_written : 0;
514 }
515 
516 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
517     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
518 };
519 
520 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
521                                            const char *device,
522                                            const char *name,
523                                            int vdpa_device_fd,
524                                            int queue_pair_index,
525                                            int nvqs,
526                                            bool is_datapath,
527                                            bool svq,
528                                            VhostIOVATree *iova_tree)
529 {
530     NetClientState *nc = NULL;
531     VhostVDPAState *s;
532     int ret = 0;
533     assert(name);
534     if (is_datapath) {
535         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
536                                  name);
537     } else {
538         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
539                                          device, name);
540     }
541     snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA);
542     s = DO_UPCAST(VhostVDPAState, nc, nc);
543 
544     s->vhost_vdpa.device_fd = vdpa_device_fd;
545     s->vhost_vdpa.index = queue_pair_index;
546     s->vhost_vdpa.shadow_vqs_enabled = svq;
547     s->vhost_vdpa.iova_tree = iova_tree;
548     if (!is_datapath) {
549         s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
550                                             vhost_vdpa_net_cvq_cmd_page_len());
551         memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
552         s->cvq_cmd_in_buffer = qemu_memalign(qemu_real_host_page_size(),
553                                             vhost_vdpa_net_cvq_cmd_page_len());
554         memset(s->cvq_cmd_in_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
555 
556         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
557         s->vhost_vdpa.shadow_vq_ops_opaque = s;
558     }
559     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
560     if (ret) {
561         qemu_del_net_client(nc);
562         return NULL;
563     }
564     return nc;
565 }
566 
567 static int vhost_vdpa_get_iova_range(int fd,
568                                      struct vhost_vdpa_iova_range *iova_range)
569 {
570     int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
571 
572     return ret < 0 ? -errno : 0;
573 }
574 
575 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
576 {
577     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
578     if (unlikely(ret < 0)) {
579         error_setg_errno(errp, errno,
580                          "Fail to query features from vhost-vDPA device");
581     }
582     return ret;
583 }
584 
585 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
586                                           int *has_cvq, Error **errp)
587 {
588     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
589     g_autofree struct vhost_vdpa_config *config = NULL;
590     __virtio16 *max_queue_pairs;
591     int ret;
592 
593     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
594         *has_cvq = 1;
595     } else {
596         *has_cvq = 0;
597     }
598 
599     if (features & (1 << VIRTIO_NET_F_MQ)) {
600         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
601         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
602         config->len = sizeof(*max_queue_pairs);
603 
604         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
605         if (ret) {
606             error_setg(errp, "Fail to get config from vhost-vDPA device");
607             return -ret;
608         }
609 
610         max_queue_pairs = (__virtio16 *)&config->buf;
611 
612         return lduw_le_p(max_queue_pairs);
613     }
614 
615     return 1;
616 }
617 
618 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
619                         NetClientState *peer, Error **errp)
620 {
621     const NetdevVhostVDPAOptions *opts;
622     uint64_t features;
623     int vdpa_device_fd;
624     g_autofree NetClientState **ncs = NULL;
625     g_autoptr(VhostIOVATree) iova_tree = NULL;
626     NetClientState *nc;
627     int queue_pairs, r, i = 0, has_cvq = 0;
628 
629     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
630     opts = &netdev->u.vhost_vdpa;
631     if (!opts->vhostdev) {
632         error_setg(errp, "vdpa character device not specified with vhostdev");
633         return -1;
634     }
635 
636     vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
637     if (vdpa_device_fd == -1) {
638         return -errno;
639     }
640 
641     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
642     if (unlikely(r < 0)) {
643         goto err;
644     }
645 
646     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
647                                                  &has_cvq, errp);
648     if (queue_pairs < 0) {
649         qemu_close(vdpa_device_fd);
650         return queue_pairs;
651     }
652 
653     if (opts->x_svq) {
654         struct vhost_vdpa_iova_range iova_range;
655 
656         uint64_t invalid_dev_features =
657             features & ~vdpa_svq_device_features &
658             /* Transport are all accepted at this point */
659             ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
660                              VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
661 
662         if (invalid_dev_features) {
663             error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
664                        invalid_dev_features);
665             goto err_svq;
666         }
667 
668         vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
669         iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
670     }
671 
672     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
673 
674     for (i = 0; i < queue_pairs; i++) {
675         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
676                                      vdpa_device_fd, i, 2, true, opts->x_svq,
677                                      iova_tree);
678         if (!ncs[i])
679             goto err;
680     }
681 
682     if (has_cvq) {
683         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
684                                  vdpa_device_fd, i, 1, false,
685                                  opts->x_svq, iova_tree);
686         if (!nc)
687             goto err;
688     }
689 
690     /* iova_tree ownership belongs to last NetClientState */
691     g_steal_pointer(&iova_tree);
692     return 0;
693 
694 err:
695     if (i) {
696         for (i--; i >= 0; i--) {
697             qemu_del_net_client(ncs[i]);
698         }
699     }
700 
701 err_svq:
702     qemu_close(vdpa_device_fd);
703 
704     return -1;
705 }
706