xref: /openbmc/qemu/net/vhost-vdpa.c (revision bcf9282f53f27da6b45d4f14c6e6f2daaadf7f23)
1  /*
2   * vhost-vdpa.c
3   *
4   * Copyright(c) 2017-2018 Intel Corporation.
5   * Copyright(c) 2020 Red Hat, Inc.
6   *
7   * This work is licensed under the terms of the GNU GPL, version 2 or later.
8   * See the COPYING file in the top-level directory.
9   *
10   */
11  
12  #include "qemu/osdep.h"
13  #include "clients.h"
14  #include "hw/virtio/virtio-net.h"
15  #include "net/vhost_net.h"
16  #include "net/vhost-vdpa.h"
17  #include "hw/virtio/vhost-vdpa.h"
18  #include "qemu/config-file.h"
19  #include "qemu/error-report.h"
20  #include "qemu/log.h"
21  #include "qemu/memalign.h"
22  #include "qemu/option.h"
23  #include "qapi/error.h"
24  #include <linux/vhost.h>
25  #include <sys/ioctl.h>
26  #include <err.h>
27  #include "standard-headers/linux/virtio_net.h"
28  #include "monitor/monitor.h"
29  #include "migration/misc.h"
30  #include "hw/virtio/vhost.h"
31  #include "trace.h"
32  
33  /* Todo:need to add the multiqueue support here */
34  typedef struct VhostVDPAState {
35      NetClientState nc;
36      struct vhost_vdpa vhost_vdpa;
37      NotifierWithReturn migration_state;
38      VHostNetState *vhost_net;
39  
40      /* Control commands shadow buffers */
41      void *cvq_cmd_out_buffer;
42      virtio_net_ctrl_ack *status;
43  
44      /* The device always have SVQ enabled */
45      bool always_svq;
46  
47      /* The device can isolate CVQ in its own ASID */
48      bool cvq_isolated;
49  
50      bool started;
51  } VhostVDPAState;
52  
53  /*
54   * The array is sorted alphabetically in ascending order,
55   * with the exception of VHOST_INVALID_FEATURE_BIT,
56   * which should always be the last entry.
57   */
58  const int vdpa_feature_bits[] = {
59      VIRTIO_F_ANY_LAYOUT,
60      VIRTIO_F_IOMMU_PLATFORM,
61      VIRTIO_F_NOTIFY_ON_EMPTY,
62      VIRTIO_F_RING_PACKED,
63      VIRTIO_F_RING_RESET,
64      VIRTIO_F_VERSION_1,
65      VIRTIO_F_IN_ORDER,
66      VIRTIO_F_NOTIFICATION_DATA,
67      VIRTIO_NET_F_CSUM,
68      VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
69      VIRTIO_NET_F_CTRL_MAC_ADDR,
70      VIRTIO_NET_F_CTRL_RX,
71      VIRTIO_NET_F_CTRL_RX_EXTRA,
72      VIRTIO_NET_F_CTRL_VLAN,
73      VIRTIO_NET_F_CTRL_VQ,
74      VIRTIO_NET_F_GSO,
75      VIRTIO_NET_F_GUEST_CSUM,
76      VIRTIO_NET_F_GUEST_ECN,
77      VIRTIO_NET_F_GUEST_TSO4,
78      VIRTIO_NET_F_GUEST_TSO6,
79      VIRTIO_NET_F_GUEST_UFO,
80      VIRTIO_NET_F_GUEST_USO4,
81      VIRTIO_NET_F_GUEST_USO6,
82      VIRTIO_NET_F_HASH_REPORT,
83      VIRTIO_NET_F_HOST_ECN,
84      VIRTIO_NET_F_HOST_TSO4,
85      VIRTIO_NET_F_HOST_TSO6,
86      VIRTIO_NET_F_HOST_UFO,
87      VIRTIO_NET_F_HOST_USO,
88      VIRTIO_NET_F_MQ,
89      VIRTIO_NET_F_MRG_RXBUF,
90      VIRTIO_NET_F_MTU,
91      VIRTIO_NET_F_RSC_EXT,
92      VIRTIO_NET_F_RSS,
93      VIRTIO_NET_F_STATUS,
94      VIRTIO_RING_F_EVENT_IDX,
95      VIRTIO_RING_F_INDIRECT_DESC,
96  
97      /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
98      VHOST_INVALID_FEATURE_BIT
99  };
100  
101  /** Supported device specific feature bits with SVQ */
102  static const uint64_t vdpa_svq_device_features =
103      BIT_ULL(VIRTIO_NET_F_CSUM) |
104      BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
105      BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
106      BIT_ULL(VIRTIO_NET_F_MTU) |
107      BIT_ULL(VIRTIO_NET_F_MAC) |
108      BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
109      BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
110      BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
111      BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
112      BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
113      BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
114      BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
115      BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
116      BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
117      BIT_ULL(VIRTIO_NET_F_STATUS) |
118      BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
119      BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
120      BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
121      BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
122      BIT_ULL(VIRTIO_NET_F_MQ) |
123      BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
124      BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
125      /* VHOST_F_LOG_ALL is exposed by SVQ */
126      BIT_ULL(VHOST_F_LOG_ALL) |
127      BIT_ULL(VIRTIO_NET_F_HASH_REPORT) |
128      BIT_ULL(VIRTIO_NET_F_RSS) |
129      BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
130      BIT_ULL(VIRTIO_NET_F_STANDBY) |
131      BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
132  
133  #define VHOST_VDPA_NET_CVQ_ASID 1
134  
vhost_vdpa_get_vhost_net(NetClientState * nc)135  VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
136  {
137      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
138      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
139      return s->vhost_net;
140  }
141  
vhost_vdpa_net_cvq_cmd_len(void)142  static size_t vhost_vdpa_net_cvq_cmd_len(void)
143  {
144      /*
145       * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
146       * In buffer is always 1 byte, so it should fit here
147       */
148      return sizeof(struct virtio_net_ctrl_hdr) +
149             2 * sizeof(struct virtio_net_ctrl_mac) +
150             MAC_TABLE_ENTRIES * ETH_ALEN;
151  }
152  
vhost_vdpa_net_cvq_cmd_page_len(void)153  static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
154  {
155      return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
156  }
157  
vhost_vdpa_net_valid_svq_features(uint64_t features,Error ** errp)158  static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
159  {
160      uint64_t invalid_dev_features =
161          features & ~vdpa_svq_device_features &
162          /* Transport are all accepted at this point */
163          ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
164                           VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
165  
166      if (invalid_dev_features) {
167          error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
168                     invalid_dev_features);
169          return false;
170      }
171  
172      return vhost_svq_valid_features(features, errp);
173  }
174  
vhost_vdpa_net_check_device_id(struct vhost_net * net)175  static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
176  {
177      uint32_t device_id;
178      int ret;
179      struct vhost_dev *hdev;
180  
181      hdev = (struct vhost_dev *)&net->dev;
182      ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
183      if (device_id != VIRTIO_ID_NET) {
184          return -ENOTSUP;
185      }
186      return ret;
187  }
188  
vhost_vdpa_add(NetClientState * ncs,void * be,int queue_pair_index,int nvqs)189  static int vhost_vdpa_add(NetClientState *ncs, void *be,
190                            int queue_pair_index, int nvqs)
191  {
192      VhostNetOptions options;
193      struct vhost_net *net = NULL;
194      VhostVDPAState *s;
195      int ret;
196  
197      options.backend_type = VHOST_BACKEND_TYPE_VDPA;
198      assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
199      s = DO_UPCAST(VhostVDPAState, nc, ncs);
200      options.net_backend = ncs;
201      options.opaque      = be;
202      options.busyloop_timeout = 0;
203      options.nvqs = nvqs;
204  
205      net = vhost_net_init(&options);
206      if (!net) {
207          error_report("failed to init vhost_net for queue");
208          goto err_init;
209      }
210      s->vhost_net = net;
211      ret = vhost_vdpa_net_check_device_id(net);
212      if (ret) {
213          goto err_check;
214      }
215      return 0;
216  err_check:
217      vhost_net_cleanup(net);
218      g_free(net);
219  err_init:
220      return -1;
221  }
222  
vhost_vdpa_cleanup(NetClientState * nc)223  static void vhost_vdpa_cleanup(NetClientState *nc)
224  {
225      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
226  
227      munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
228      munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
229      if (s->vhost_net) {
230          vhost_net_cleanup(s->vhost_net);
231          g_free(s->vhost_net);
232          s->vhost_net = NULL;
233      }
234      if (s->vhost_vdpa.index != 0) {
235          return;
236      }
237      qemu_close(s->vhost_vdpa.shared->device_fd);
238      g_free(s->vhost_vdpa.shared);
239  }
240  
241  /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
vhost_vdpa_set_steering_ebpf(NetClientState * nc,int prog_fd)242  static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
243  {
244      return true;
245  }
246  
vhost_vdpa_has_vnet_hdr(NetClientState * nc)247  static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
248  {
249      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
250  
251      return true;
252  }
253  
vhost_vdpa_has_ufo(NetClientState * nc)254  static bool vhost_vdpa_has_ufo(NetClientState *nc)
255  {
256      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
257      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
258      uint64_t features = 0;
259      features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
260      features = vhost_net_get_features(s->vhost_net, features);
261      return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
262  
263  }
264  
265  /*
266   * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's
267   * reasonable to assume that h/w is LE by default, because LE is what
268   * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is
269   * LE". Otherwise, on a BE machine, higher-level code would mistakely think
270   * the h/w is BE and can't support VDPA for a virtio 1.0 client.
271   */
vhost_vdpa_set_vnet_le(NetClientState * nc,bool enable)272  static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable)
273  {
274      return 0;
275  }
276  
vhost_vdpa_check_peer_type(NetClientState * nc,ObjectClass * oc,Error ** errp)277  static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
278                                         Error **errp)
279  {
280      const char *driver = object_class_get_name(oc);
281  
282      if (!g_str_has_prefix(driver, "virtio-net-")) {
283          error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
284          return false;
285      }
286  
287      return true;
288  }
289  
290  /** Dummy receive in case qemu falls back to userland tap networking */
vhost_vdpa_receive(NetClientState * nc,const uint8_t * buf,size_t size)291  static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
292                                    size_t size)
293  {
294      return size;
295  }
296  
297  
298  /** From any vdpa net client, get the netclient of the i-th queue pair */
vhost_vdpa_net_get_nc_vdpa(VhostVDPAState * s,int i)299  static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i)
300  {
301      NICState *nic = qemu_get_nic(s->nc.peer);
302      NetClientState *nc_i = qemu_get_peer(nic->ncs, i);
303  
304      return DO_UPCAST(VhostVDPAState, nc, nc_i);
305  }
306  
vhost_vdpa_net_first_nc_vdpa(VhostVDPAState * s)307  static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
308  {
309      return vhost_vdpa_net_get_nc_vdpa(s, 0);
310  }
311  
vhost_vdpa_net_log_global_enable(VhostVDPAState * s,bool enable)312  static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
313  {
314      struct vhost_vdpa *v = &s->vhost_vdpa;
315      VirtIONet *n;
316      VirtIODevice *vdev;
317      int data_queue_pairs, cvq, r;
318  
319      /* We are only called on the first data vqs and only if x-svq is not set */
320      if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
321          return;
322      }
323  
324      vdev = v->dev->vdev;
325      n = VIRTIO_NET(vdev);
326      if (!n->vhost_started) {
327          return;
328      }
329  
330      data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
331      cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
332                                    n->max_ncs - n->max_queue_pairs : 0;
333      v->shared->svq_switching = enable ?
334          SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING;
335      /*
336       * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
337       * in the future and resume the device if read-only operations between
338       * suspend and reset goes wrong.
339       */
340      vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
341  
342      /* Start will check migration setup_or_active to configure or not SVQ */
343      r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
344      if (unlikely(r < 0)) {
345          error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
346      }
347      v->shared->svq_switching = SVQ_TSTATE_DONE;
348  }
349  
vdpa_net_migration_state_notifier(NotifierWithReturn * notifier,MigrationEvent * e,Error ** errp)350  static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
351                                               MigrationEvent *e, Error **errp)
352  {
353      VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state);
354  
355      if (e->type == MIG_EVENT_PRECOPY_SETUP) {
356          vhost_vdpa_net_log_global_enable(s, true);
357      } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
358          vhost_vdpa_net_log_global_enable(s, false);
359      }
360      return 0;
361  }
362  
vhost_vdpa_net_data_start_first(VhostVDPAState * s)363  static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
364  {
365      struct vhost_vdpa *v = &s->vhost_vdpa;
366  
367      migration_add_notifier(&s->migration_state,
368                             vdpa_net_migration_state_notifier);
369      if (v->shadow_vqs_enabled) {
370          v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
371                                                     v->shared->iova_range.last);
372      }
373  }
374  
vhost_vdpa_net_data_start(NetClientState * nc)375  static int vhost_vdpa_net_data_start(NetClientState *nc)
376  {
377      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
378      struct vhost_vdpa *v = &s->vhost_vdpa;
379  
380      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
381  
382      if (s->always_svq || migration_is_running()) {
383          v->shadow_vqs_enabled = true;
384      } else {
385          v->shadow_vqs_enabled = false;
386      }
387  
388      if (v->index == 0) {
389          v->shared->shadow_data = v->shadow_vqs_enabled;
390          vhost_vdpa_net_data_start_first(s);
391          return 0;
392      }
393  
394      return 0;
395  }
396  
vhost_vdpa_net_data_load(NetClientState * nc)397  static int vhost_vdpa_net_data_load(NetClientState *nc)
398  {
399      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
400      struct vhost_vdpa *v = &s->vhost_vdpa;
401      bool has_cvq = v->dev->vq_index_end % 2;
402  
403      if (has_cvq) {
404          return 0;
405      }
406  
407      for (int i = 0; i < v->dev->nvqs; ++i) {
408          int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
409          if (ret < 0) {
410              return ret;
411          }
412      }
413      return 0;
414  }
415  
vhost_vdpa_net_client_stop(NetClientState * nc)416  static void vhost_vdpa_net_client_stop(NetClientState *nc)
417  {
418      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
419      struct vhost_dev *dev;
420  
421      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
422  
423      if (s->vhost_vdpa.index == 0) {
424          migration_remove_notifier(&s->migration_state);
425      }
426  
427      dev = s->vhost_vdpa.dev;
428      if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
429          g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
430                          vhost_iova_tree_delete);
431      }
432  }
433  
434  static NetClientInfo net_vhost_vdpa_info = {
435          .type = NET_CLIENT_DRIVER_VHOST_VDPA,
436          .size = sizeof(VhostVDPAState),
437          .receive = vhost_vdpa_receive,
438          .start = vhost_vdpa_net_data_start,
439          .load = vhost_vdpa_net_data_load,
440          .stop = vhost_vdpa_net_client_stop,
441          .cleanup = vhost_vdpa_cleanup,
442          .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
443          .has_ufo = vhost_vdpa_has_ufo,
444          .set_vnet_le = vhost_vdpa_set_vnet_le,
445          .check_peer_type = vhost_vdpa_check_peer_type,
446          .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
447  };
448  
vhost_vdpa_get_vring_group(int device_fd,unsigned vq_index,Error ** errp)449  static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
450                                            Error **errp)
451  {
452      struct vhost_vring_state state = {
453          .index = vq_index,
454      };
455      int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
456  
457      if (unlikely(r < 0)) {
458          r = -errno;
459          error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
460          return r;
461      }
462  
463      return state.num;
464  }
465  
vhost_vdpa_set_address_space_id(struct vhost_vdpa * v,unsigned vq_group,unsigned asid_num)466  static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
467                                             unsigned vq_group,
468                                             unsigned asid_num)
469  {
470      struct vhost_vring_state asid = {
471          .index = vq_group,
472          .num = asid_num,
473      };
474      int r;
475  
476      trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num);
477  
478      r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
479      if (unlikely(r < 0)) {
480          error_report("Can't set vq group %u asid %u, errno=%d (%s)",
481                       asid.index, asid.num, errno, g_strerror(errno));
482      }
483      return r;
484  }
485  
vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa * v,void * addr)486  static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
487  {
488      VhostIOVATree *tree = v->shared->iova_tree;
489      DMAMap needle = {
490          /*
491           * No need to specify size or to look for more translations since
492           * this contiguous chunk was allocated by us.
493           */
494          .translated_addr = (hwaddr)(uintptr_t)addr,
495      };
496      const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
497      int r;
498  
499      if (unlikely(!map)) {
500          error_report("Cannot locate expected map");
501          return;
502      }
503  
504      r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
505                               map->size + 1);
506      if (unlikely(r != 0)) {
507          error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
508      }
509  
510      vhost_iova_tree_remove(tree, *map);
511  }
512  
513  /** Map CVQ buffer. */
vhost_vdpa_cvq_map_buf(struct vhost_vdpa * v,void * buf,size_t size,bool write)514  static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
515                                    bool write)
516  {
517      DMAMap map = {};
518      int r;
519  
520      map.translated_addr = (hwaddr)(uintptr_t)buf;
521      map.size = size - 1;
522      map.perm = write ? IOMMU_RW : IOMMU_RO,
523      r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map);
524      if (unlikely(r != IOVA_OK)) {
525          error_report("Cannot map injected element");
526          return r;
527      }
528  
529      r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
530                             vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
531      if (unlikely(r < 0)) {
532          goto dma_map_err;
533      }
534  
535      return 0;
536  
537  dma_map_err:
538      vhost_iova_tree_remove(v->shared->iova_tree, map);
539      return r;
540  }
541  
vhost_vdpa_net_cvq_start(NetClientState * nc)542  static int vhost_vdpa_net_cvq_start(NetClientState *nc)
543  {
544      VhostVDPAState *s, *s0;
545      struct vhost_vdpa *v;
546      int64_t cvq_group;
547      int r;
548      Error *err = NULL;
549  
550      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
551  
552      s = DO_UPCAST(VhostVDPAState, nc, nc);
553      v = &s->vhost_vdpa;
554  
555      s0 = vhost_vdpa_net_first_nc_vdpa(s);
556      v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
557      s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
558  
559      if (v->shared->shadow_data) {
560          /* SVQ is already configured for all virtqueues */
561          goto out;
562      }
563  
564      /*
565       * If we early return in these cases SVQ will not be enabled. The migration
566       * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
567       */
568      if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
569          return 0;
570      }
571  
572      if (!s->cvq_isolated) {
573          return 0;
574      }
575  
576      cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
577                                             v->dev->vq_index_end - 1,
578                                             &err);
579      if (unlikely(cvq_group < 0)) {
580          error_report_err(err);
581          return cvq_group;
582      }
583  
584      r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
585      if (unlikely(r < 0)) {
586          return r;
587      }
588  
589      v->shadow_vqs_enabled = true;
590      s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
591  
592  out:
593      if (!s->vhost_vdpa.shadow_vqs_enabled) {
594          return 0;
595      }
596  
597      /*
598       * If other vhost_vdpa already have an iova_tree, reuse it for simplicity,
599       * whether CVQ shares ASID with guest or not, because:
600       * - Memory listener need access to guest's memory addresses allocated in
601       *   the IOVA tree.
602       * - There should be plenty of IOVA address space for both ASID not to
603       *   worry about collisions between them.  Guest's translations are still
604       *   validated with virtio virtqueue_pop so there is no risk for the guest
605       *   to access memory that it shouldn't.
606       *
607       * To allocate a iova tree per ASID is doable but it complicates the code
608       * and it is not worth it for the moment.
609       */
610      if (!v->shared->iova_tree) {
611          v->shared->iova_tree = vhost_iova_tree_new(v->shared->iova_range.first,
612                                                     v->shared->iova_range.last);
613      }
614  
615      r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
616                                 vhost_vdpa_net_cvq_cmd_page_len(), false);
617      if (unlikely(r < 0)) {
618          return r;
619      }
620  
621      r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
622                                 vhost_vdpa_net_cvq_cmd_page_len(), true);
623      if (unlikely(r < 0)) {
624          vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
625      }
626  
627      return r;
628  }
629  
vhost_vdpa_net_cvq_stop(NetClientState * nc)630  static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
631  {
632      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
633  
634      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
635  
636      if (s->vhost_vdpa.shadow_vqs_enabled) {
637          vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
638          vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
639      }
640  
641      vhost_vdpa_net_client_stop(nc);
642  }
643  
vhost_vdpa_net_cvq_add(VhostVDPAState * s,const struct iovec * out_sg,size_t out_num,const struct iovec * in_sg,size_t in_num)644  static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
645                                      const struct iovec *out_sg, size_t out_num,
646                                      const struct iovec *in_sg, size_t in_num)
647  {
648      VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
649      int r;
650  
651      r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
652      if (unlikely(r != 0)) {
653          if (unlikely(r == -ENOSPC)) {
654              qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
655                            __func__);
656          }
657      }
658  
659      return r;
660  }
661  
662  /*
663   * Convenience wrapper to poll SVQ for multiple control commands.
664   *
665   * Caller should hold the BQL when invoking this function, and should take
666   * the answer before SVQ pulls by itself when BQL is released.
667   */
vhost_vdpa_net_svq_poll(VhostVDPAState * s,size_t cmds_in_flight)668  static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
669  {
670      VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
671      return vhost_svq_poll(svq, cmds_in_flight);
672  }
673  
vhost_vdpa_net_load_cursor_reset(VhostVDPAState * s,struct iovec * out_cursor,struct iovec * in_cursor)674  static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
675                                               struct iovec *out_cursor,
676                                               struct iovec *in_cursor)
677  {
678      /* reset the cursor of the output buffer for the device */
679      out_cursor->iov_base = s->cvq_cmd_out_buffer;
680      out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
681  
682      /* reset the cursor of the in buffer for the device */
683      in_cursor->iov_base = s->status;
684      in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
685  }
686  
687  /*
688   * Poll SVQ for multiple pending control commands and check the device's ack.
689   *
690   * Caller should hold the BQL when invoking this function.
691   *
692   * @s: The VhostVDPAState
693   * @len: The length of the pending status shadow buffer
694   */
vhost_vdpa_net_svq_flush(VhostVDPAState * s,size_t len)695  static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
696  {
697      /* device uses a one-byte length ack for each control command */
698      ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
699      if (unlikely(dev_written != len)) {
700          return -EIO;
701      }
702  
703      /* check the device's ack */
704      for (int i = 0; i < len; ++i) {
705          if (s->status[i] != VIRTIO_NET_OK) {
706              return -EIO;
707          }
708      }
709      return 0;
710  }
711  
vhost_vdpa_net_load_cmd(VhostVDPAState * s,struct iovec * out_cursor,struct iovec * in_cursor,uint8_t class,uint8_t cmd,const struct iovec * data_sg,size_t data_num)712  static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
713                                         struct iovec *out_cursor,
714                                         struct iovec *in_cursor, uint8_t class,
715                                         uint8_t cmd, const struct iovec *data_sg,
716                                         size_t data_num)
717  {
718      const struct virtio_net_ctrl_hdr ctrl = {
719          .class = class,
720          .cmd = cmd,
721      };
722      size_t data_size = iov_size(data_sg, data_num), cmd_size;
723      struct iovec out, in;
724      ssize_t r;
725      unsigned dummy_cursor_iov_cnt;
726      VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
727  
728      assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
729      cmd_size = sizeof(ctrl) + data_size;
730      trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size);
731      if (vhost_svq_available_slots(svq) < 2 ||
732          iov_size(out_cursor, 1) < cmd_size) {
733          /*
734           * It is time to flush all pending control commands if SVQ is full
735           * or control commands shadow buffers are full.
736           *
737           * We can poll here since we've had BQL from the time
738           * we sent the descriptor.
739           */
740          r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
741                                       (void *)s->status);
742          if (unlikely(r < 0)) {
743              return r;
744          }
745  
746          vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
747      }
748  
749      /* pack the CVQ command header */
750      iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
751      /* pack the CVQ command command-specific-data */
752      iov_to_buf(data_sg, data_num, 0,
753                 out_cursor->iov_base + sizeof(ctrl), data_size);
754  
755      /* extract the required buffer from the cursor for output */
756      iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
757      /* extract the required buffer from the cursor for input */
758      iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
759  
760      r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
761      if (unlikely(r < 0)) {
762          trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r);
763          return r;
764      }
765  
766      /* iterate the cursors */
767      dummy_cursor_iov_cnt = 1;
768      iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
769      dummy_cursor_iov_cnt = 1;
770      iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
771  
772      return 0;
773  }
774  
vhost_vdpa_net_load_mac(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)775  static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
776                                     struct iovec *out_cursor,
777                                     struct iovec *in_cursor)
778  {
779      if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
780          const struct iovec data = {
781              .iov_base = (void *)n->mac,
782              .iov_len = sizeof(n->mac),
783          };
784          ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
785                                              VIRTIO_NET_CTRL_MAC,
786                                              VIRTIO_NET_CTRL_MAC_ADDR_SET,
787                                              &data, 1);
788          if (unlikely(r < 0)) {
789              return r;
790          }
791      }
792  
793      /*
794       * According to VirtIO standard, "The device MUST have an
795       * empty MAC filtering table on reset.".
796       *
797       * Therefore, there is no need to send this CVQ command if the
798       * driver also sets an empty MAC filter table, which aligns with
799       * the device's defaults.
800       *
801       * Note that the device's defaults can mismatch the driver's
802       * configuration only at live migration.
803       */
804      if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
805          n->mac_table.in_use == 0) {
806          return 0;
807      }
808  
809      uint32_t uni_entries = n->mac_table.first_multi,
810               uni_macs_size = uni_entries * ETH_ALEN,
811               mul_entries = n->mac_table.in_use - uni_entries,
812               mul_macs_size = mul_entries * ETH_ALEN;
813      struct virtio_net_ctrl_mac uni = {
814          .entries = cpu_to_le32(uni_entries),
815      };
816      struct virtio_net_ctrl_mac mul = {
817          .entries = cpu_to_le32(mul_entries),
818      };
819      const struct iovec data[] = {
820          {
821              .iov_base = &uni,
822              .iov_len = sizeof(uni),
823          }, {
824              .iov_base = n->mac_table.macs,
825              .iov_len = uni_macs_size,
826          }, {
827              .iov_base = &mul,
828              .iov_len = sizeof(mul),
829          }, {
830              .iov_base = &n->mac_table.macs[uni_macs_size],
831              .iov_len = mul_macs_size,
832          },
833      };
834      ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
835                                          VIRTIO_NET_CTRL_MAC,
836                                          VIRTIO_NET_CTRL_MAC_TABLE_SET,
837                                          data, ARRAY_SIZE(data));
838      if (unlikely(r < 0)) {
839          return r;
840      }
841  
842      return 0;
843  }
844  
vhost_vdpa_net_load_rss(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor,bool do_rss)845  static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
846                                     struct iovec *out_cursor,
847                                     struct iovec *in_cursor, bool do_rss)
848  {
849      struct virtio_net_rss_config cfg = {};
850      ssize_t r;
851      g_autofree uint16_t *table = NULL;
852  
853      /*
854       * According to VirtIO standard, "Initially the device has all hash
855       * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
856       *
857       * Therefore, there is no need to send this CVQ command if the
858       * driver disables the all hash types, which aligns with
859       * the device's defaults.
860       *
861       * Note that the device's defaults can mismatch the driver's
862       * configuration only at live migration.
863       */
864      if (!n->rss_data.enabled ||
865          n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
866          return 0;
867      }
868  
869      table = g_malloc_n(n->rss_data.indirections_len,
870                         sizeof(n->rss_data.indirections_table[0]));
871      cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
872  
873      if (do_rss) {
874          /*
875           * According to VirtIO standard, "Number of entries in indirection_table
876           * is (indirection_table_mask + 1)".
877           */
878          cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len -
879                                                   1);
880          cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue);
881          for (int i = 0; i < n->rss_data.indirections_len; ++i) {
882              table[i] = cpu_to_le16(n->rss_data.indirections_table[i]);
883          }
884          cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs);
885      } else {
886          /*
887           * According to VirtIO standard, "Field reserved MUST contain zeroes.
888           * It is defined to make the structure to match the layout of
889           * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
890           *
891           * Therefore, we need to zero the fields in
892           * struct virtio_net_rss_config, which corresponds to the
893           * `reserved` field in struct virtio_net_hash_config.
894           *
895           * Note that all other fields are zeroed at their definitions,
896           * except for the `indirection_table` field, where the actual data
897           * is stored in the `table` variable to ensure compatibility
898           * with RSS case. Therefore, we need to zero the `table` variable here.
899           */
900          table[0] = 0;
901      }
902  
903      /*
904       * Considering that virtio_net_handle_rss() currently does not restore
905       * the hash key length parsed from the CVQ command sent from the guest
906       * into n->rss_data and uses the maximum key length in other code, so
907       * we also employ the maximum key length here.
908       */
909      cfg.hash_key_length = sizeof(n->rss_data.key);
910  
911      const struct iovec data[] = {
912          {
913              .iov_base = &cfg,
914              .iov_len = offsetof(struct virtio_net_rss_config,
915                                  indirection_table),
916          }, {
917              .iov_base = table,
918              .iov_len = n->rss_data.indirections_len *
919                         sizeof(n->rss_data.indirections_table[0]),
920          }, {
921              .iov_base = &cfg.max_tx_vq,
922              .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) -
923                         offsetof(struct virtio_net_rss_config, max_tx_vq),
924          }, {
925              .iov_base = (void *)n->rss_data.key,
926              .iov_len = sizeof(n->rss_data.key),
927          }
928      };
929  
930      r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
931                                  VIRTIO_NET_CTRL_MQ,
932                                  do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG :
933                                  VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
934                                  data, ARRAY_SIZE(data));
935      if (unlikely(r < 0)) {
936          return r;
937      }
938  
939      return 0;
940  }
941  
vhost_vdpa_net_load_mq(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)942  static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
943                                    const VirtIONet *n,
944                                    struct iovec *out_cursor,
945                                    struct iovec *in_cursor)
946  {
947      struct virtio_net_ctrl_mq mq;
948      ssize_t r;
949  
950      if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
951          return 0;
952      }
953  
954      trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs);
955  
956      mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
957      const struct iovec data = {
958          .iov_base = &mq,
959          .iov_len = sizeof(mq),
960      };
961      r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
962                                  VIRTIO_NET_CTRL_MQ,
963                                  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
964                                  &data, 1);
965      if (unlikely(r < 0)) {
966          return r;
967      }
968  
969      if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) {
970          /* load the receive-side scaling state */
971          r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true);
972          if (unlikely(r < 0)) {
973              return r;
974          }
975      } else if (virtio_vdev_has_feature(&n->parent_obj,
976                                         VIRTIO_NET_F_HASH_REPORT)) {
977          /* load the hash calculation state */
978          r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false);
979          if (unlikely(r < 0)) {
980              return r;
981          }
982      }
983  
984      return 0;
985  }
986  
vhost_vdpa_net_load_offloads(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)987  static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
988                                          const VirtIONet *n,
989                                          struct iovec *out_cursor,
990                                          struct iovec *in_cursor)
991  {
992      uint64_t offloads;
993      ssize_t r;
994  
995      if (!virtio_vdev_has_feature(&n->parent_obj,
996                                   VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
997          return 0;
998      }
999  
1000      if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
1001          /*
1002           * According to VirtIO standard, "Upon feature negotiation
1003           * corresponding offload gets enabled to preserve
1004           * backward compatibility.".
1005           *
1006           * Therefore, there is no need to send this CVQ command if the
1007           * driver also enables all supported offloads, which aligns with
1008           * the device's defaults.
1009           *
1010           * Note that the device's defaults can mismatch the driver's
1011           * configuration only at live migration.
1012           */
1013          return 0;
1014      }
1015  
1016      offloads = cpu_to_le64(n->curr_guest_offloads);
1017      const struct iovec data = {
1018          .iov_base = &offloads,
1019          .iov_len = sizeof(offloads),
1020      };
1021      r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1022                                  VIRTIO_NET_CTRL_GUEST_OFFLOADS,
1023                                  VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
1024                                  &data, 1);
1025      if (unlikely(r < 0)) {
1026          return r;
1027      }
1028  
1029      return 0;
1030  }
1031  
vhost_vdpa_net_load_rx_mode(VhostVDPAState * s,struct iovec * out_cursor,struct iovec * in_cursor,uint8_t cmd,uint8_t on)1032  static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
1033                                         struct iovec *out_cursor,
1034                                         struct iovec *in_cursor,
1035                                         uint8_t cmd,
1036                                         uint8_t on)
1037  {
1038      const struct iovec data = {
1039          .iov_base = &on,
1040          .iov_len = sizeof(on),
1041      };
1042      ssize_t r;
1043  
1044      r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1045                                  VIRTIO_NET_CTRL_RX, cmd, &data, 1);
1046      if (unlikely(r < 0)) {
1047          return r;
1048      }
1049  
1050      return 0;
1051  }
1052  
vhost_vdpa_net_load_rx(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)1053  static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
1054                                    const VirtIONet *n,
1055                                    struct iovec *out_cursor,
1056                                    struct iovec *in_cursor)
1057  {
1058      ssize_t r;
1059  
1060      if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
1061          return 0;
1062      }
1063  
1064      /*
1065       * According to virtio_net_reset(), device turns promiscuous mode
1066       * on by default.
1067       *
1068       * Additionally, according to VirtIO standard, "Since there are
1069       * no guarantees, it can use a hash filter or silently switch to
1070       * allmulti or promiscuous mode if it is given too many addresses.".
1071       * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1072       * non-multicast MAC addresses, indicating that promiscuous mode
1073       * should be enabled.
1074       *
1075       * Therefore, QEMU should only send this CVQ command if the
1076       * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1077       * which sets promiscuous mode on, different from the device's defaults.
1078       *
1079       * Note that the device's defaults can mismatch the driver's
1080       * configuration only at live migration.
1081       */
1082      if (!n->mac_table.uni_overflow && !n->promisc) {
1083          r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1084                                          VIRTIO_NET_CTRL_RX_PROMISC, 0);
1085          if (unlikely(r < 0)) {
1086              return r;
1087          }
1088      }
1089  
1090      /*
1091       * According to virtio_net_reset(), device turns all-multicast mode
1092       * off by default.
1093       *
1094       * According to VirtIO standard, "Since there are no guarantees,
1095       * it can use a hash filter or silently switch to allmulti or
1096       * promiscuous mode if it is given too many addresses.". QEMU marks
1097       * `n->mac_table.multi_overflow` if guest sets too many
1098       * non-multicast MAC addresses.
1099       *
1100       * Therefore, QEMU should only send this CVQ command if the
1101       * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1102       * which sets all-multicast mode on, different from the device's defaults.
1103       *
1104       * Note that the device's defaults can mismatch the driver's
1105       * configuration only at live migration.
1106       */
1107      if (n->mac_table.multi_overflow || n->allmulti) {
1108          r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1109                                          VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
1110          if (unlikely(r < 0)) {
1111              return r;
1112          }
1113      }
1114  
1115      if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
1116          return 0;
1117      }
1118  
1119      /*
1120       * According to virtio_net_reset(), device turns all-unicast mode
1121       * off by default.
1122       *
1123       * Therefore, QEMU should only send this CVQ command if the driver
1124       * sets all-unicast mode on, different from the device's defaults.
1125       *
1126       * Note that the device's defaults can mismatch the driver's
1127       * configuration only at live migration.
1128       */
1129      if (n->alluni) {
1130          r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1131                                          VIRTIO_NET_CTRL_RX_ALLUNI, 1);
1132          if (r < 0) {
1133              return r;
1134          }
1135      }
1136  
1137      /*
1138       * According to virtio_net_reset(), device turns non-multicast mode
1139       * off by default.
1140       *
1141       * Therefore, QEMU should only send this CVQ command if the driver
1142       * sets non-multicast mode on, different from the device's defaults.
1143       *
1144       * Note that the device's defaults can mismatch the driver's
1145       * configuration only at live migration.
1146       */
1147      if (n->nomulti) {
1148          r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1149                                          VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1150          if (r < 0) {
1151              return r;
1152          }
1153      }
1154  
1155      /*
1156       * According to virtio_net_reset(), device turns non-unicast mode
1157       * off by default.
1158       *
1159       * Therefore, QEMU should only send this CVQ command if the driver
1160       * sets non-unicast mode on, different from the device's defaults.
1161       *
1162       * Note that the device's defaults can mismatch the driver's
1163       * configuration only at live migration.
1164       */
1165      if (n->nouni) {
1166          r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1167                                          VIRTIO_NET_CTRL_RX_NOUNI, 1);
1168          if (r < 0) {
1169              return r;
1170          }
1171      }
1172  
1173      /*
1174       * According to virtio_net_reset(), device turns non-broadcast mode
1175       * off by default.
1176       *
1177       * Therefore, QEMU should only send this CVQ command if the driver
1178       * sets non-broadcast mode on, different from the device's defaults.
1179       *
1180       * Note that the device's defaults can mismatch the driver's
1181       * configuration only at live migration.
1182       */
1183      if (n->nobcast) {
1184          r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1185                                          VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1186          if (r < 0) {
1187              return r;
1188          }
1189      }
1190  
1191      return 0;
1192  }
1193  
vhost_vdpa_net_load_single_vlan(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor,uint16_t vid)1194  static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1195                                             const VirtIONet *n,
1196                                             struct iovec *out_cursor,
1197                                             struct iovec *in_cursor,
1198                                             uint16_t vid)
1199  {
1200      const struct iovec data = {
1201          .iov_base = &vid,
1202          .iov_len = sizeof(vid),
1203      };
1204      ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1205                                          VIRTIO_NET_CTRL_VLAN,
1206                                          VIRTIO_NET_CTRL_VLAN_ADD,
1207                                          &data, 1);
1208      if (unlikely(r < 0)) {
1209          return r;
1210      }
1211  
1212      return 0;
1213  }
1214  
vhost_vdpa_net_load_vlan(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)1215  static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1216                                      const VirtIONet *n,
1217                                      struct iovec *out_cursor,
1218                                      struct iovec *in_cursor)
1219  {
1220      int r;
1221  
1222      if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1223          return 0;
1224      }
1225  
1226      for (int i = 0; i < MAX_VLAN >> 5; i++) {
1227          for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1228              if (n->vlans[i] & (1U << j)) {
1229                  r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1230                                                      in_cursor, (i << 5) + j);
1231                  if (unlikely(r != 0)) {
1232                      return r;
1233                  }
1234              }
1235          }
1236      }
1237  
1238      return 0;
1239  }
1240  
vhost_vdpa_net_cvq_load(NetClientState * nc)1241  static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1242  {
1243      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1244      struct vhost_vdpa *v = &s->vhost_vdpa;
1245      const VirtIONet *n;
1246      int r;
1247      struct iovec out_cursor, in_cursor;
1248  
1249      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1250  
1251      r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1252      if (unlikely(r < 0)) {
1253          return r;
1254      }
1255  
1256      if (v->shadow_vqs_enabled) {
1257          n = VIRTIO_NET(v->dev->vdev);
1258          vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1259          r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1260          if (unlikely(r < 0)) {
1261              return r;
1262          }
1263          r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1264          if (unlikely(r)) {
1265              return r;
1266          }
1267          r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1268          if (unlikely(r)) {
1269              return r;
1270          }
1271          r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1272          if (unlikely(r)) {
1273              return r;
1274          }
1275          r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1276          if (unlikely(r)) {
1277              return r;
1278          }
1279  
1280          /*
1281           * We need to poll and check all pending device's used buffers.
1282           *
1283           * We can poll here since we've had BQL from the time
1284           * we sent the descriptor.
1285           */
1286          r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1287          if (unlikely(r)) {
1288              return r;
1289          }
1290      }
1291  
1292      for (int i = 0; i < v->dev->vq_index; ++i) {
1293          r = vhost_vdpa_set_vring_ready(v, i);
1294          if (unlikely(r < 0)) {
1295              return r;
1296          }
1297      }
1298  
1299      return 0;
1300  }
1301  
1302  static NetClientInfo net_vhost_vdpa_cvq_info = {
1303      .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1304      .size = sizeof(VhostVDPAState),
1305      .receive = vhost_vdpa_receive,
1306      .start = vhost_vdpa_net_cvq_start,
1307      .load = vhost_vdpa_net_cvq_load,
1308      .stop = vhost_vdpa_net_cvq_stop,
1309      .cleanup = vhost_vdpa_cleanup,
1310      .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1311      .has_ufo = vhost_vdpa_has_ufo,
1312      .check_peer_type = vhost_vdpa_check_peer_type,
1313      .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
1314  };
1315  
1316  /*
1317   * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1318   * vdpa device.
1319   *
1320   * Considering that QEMU cannot send the entire filter table to the
1321   * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1322   * command to enable promiscuous mode to receive all packets,
1323   * according to VirtIO standard, "Since there are no guarantees,
1324   * it can use a hash filter or silently switch to allmulti or
1325   * promiscuous mode if it is given too many addresses.".
1326   *
1327   * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1328   * marks `n->mac_table.x_overflow` accordingly, it should have
1329   * the same effect on the device model to receive
1330   * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1331   * The same applies to multicast MAC addresses.
1332   *
1333   * Therefore, QEMU can provide the device model with a fake
1334   * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1335   * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1336   * MAC addresses. This ensures that the device model marks
1337   * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1338   * allowing all packets to be received, which aligns with the
1339   * state of the vdpa device.
1340   */
vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState * s,VirtQueueElement * elem,struct iovec * out,const struct iovec * in)1341  static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1342                                                         VirtQueueElement *elem,
1343                                                         struct iovec *out,
1344                                                         const struct iovec *in)
1345  {
1346      struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1347      struct virtio_net_ctrl_hdr *hdr_ptr;
1348      uint32_t cursor;
1349      ssize_t r;
1350      uint8_t on = 1;
1351  
1352      /* parse the non-multicast MAC address entries from CVQ command */
1353      cursor = sizeof(*hdr_ptr);
1354      r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1355                     &mac_data, sizeof(mac_data));
1356      if (unlikely(r != sizeof(mac_data))) {
1357          /*
1358           * If the CVQ command is invalid, we should simulate the vdpa device
1359           * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1360           */
1361          *s->status = VIRTIO_NET_ERR;
1362          return sizeof(*s->status);
1363      }
1364      cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1365  
1366      /* parse the multicast MAC address entries from CVQ command */
1367      r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1368                     &mac_data, sizeof(mac_data));
1369      if (r != sizeof(mac_data)) {
1370          /*
1371           * If the CVQ command is invalid, we should simulate the vdpa device
1372           * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1373           */
1374          *s->status = VIRTIO_NET_ERR;
1375          return sizeof(*s->status);
1376      }
1377      cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1378  
1379      /* validate the CVQ command */
1380      if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1381          /*
1382           * If the CVQ command is invalid, we should simulate the vdpa device
1383           * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1384           */
1385          *s->status = VIRTIO_NET_ERR;
1386          return sizeof(*s->status);
1387      }
1388  
1389      /*
1390       * According to VirtIO standard, "Since there are no guarantees,
1391       * it can use a hash filter or silently switch to allmulti or
1392       * promiscuous mode if it is given too many addresses.".
1393       *
1394       * Therefore, considering that QEMU is unable to send the entire
1395       * filter table to the vdpa device, it should send the
1396       * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1397       */
1398      hdr_ptr = out->iov_base;
1399      out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1400  
1401      hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1402      hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1403      iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1404      r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1405      if (unlikely(r < 0)) {
1406          return r;
1407      }
1408  
1409      /*
1410       * We can poll here since we've had BQL from the time
1411       * we sent the descriptor.
1412       */
1413      r = vhost_vdpa_net_svq_poll(s, 1);
1414      if (unlikely(r < sizeof(*s->status))) {
1415          return r;
1416      }
1417      if (*s->status != VIRTIO_NET_OK) {
1418          return sizeof(*s->status);
1419      }
1420  
1421      /*
1422       * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1423       * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1424       * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1425       * multicast MAC addresses.
1426       *
1427       * By doing so, the device model can mark `n->mac_table.uni_overflow`
1428       * and `n->mac_table.multi_overflow`, enabling all packets to be
1429       * received, which aligns with the state of the vdpa device.
1430       */
1431      cursor = 0;
1432      uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1433               fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1434               fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1435                               sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1436                               sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1437  
1438      assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1439      out->iov_len = fake_cvq_size;
1440  
1441      /* pack the header for fake CVQ command */
1442      hdr_ptr = out->iov_base + cursor;
1443      hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1444      hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1445      cursor += sizeof(*hdr_ptr);
1446  
1447      /*
1448       * Pack the non-multicast MAC addresses part for fake CVQ command.
1449       *
1450       * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1451       * addresses provided in CVQ command. Therefore, only the entries
1452       * field need to be prepared in the CVQ command.
1453       */
1454      mac_ptr = out->iov_base + cursor;
1455      mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1456      cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1457  
1458      /*
1459       * Pack the multicast MAC addresses part for fake CVQ command.
1460       *
1461       * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1462       * addresses provided in CVQ command. Therefore, only the entries
1463       * field need to be prepared in the CVQ command.
1464       */
1465      mac_ptr = out->iov_base + cursor;
1466      mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1467  
1468      /*
1469       * Simulating QEMU poll a vdpa device used buffer
1470       * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1471       */
1472      return sizeof(*s->status);
1473  }
1474  
1475  /**
1476   * Validate and copy control virtqueue commands.
1477   *
1478   * Following QEMU guidelines, we offer a copy of the buffers to the device to
1479   * prevent TOCTOU bugs.
1480   */
vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue * svq,VirtQueueElement * elem,void * opaque)1481  static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1482                                              VirtQueueElement *elem,
1483                                              void *opaque)
1484  {
1485      VhostVDPAState *s = opaque;
1486      size_t in_len;
1487      const struct virtio_net_ctrl_hdr *ctrl;
1488      virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1489      /* Out buffer sent to both the vdpa device and the device model */
1490      struct iovec out = {
1491          .iov_base = s->cvq_cmd_out_buffer,
1492      };
1493      /* in buffer used for device model */
1494      const struct iovec model_in = {
1495          .iov_base = &status,
1496          .iov_len = sizeof(status),
1497      };
1498      /* in buffer used for vdpa device */
1499      const struct iovec vdpa_in = {
1500          .iov_base = s->status,
1501          .iov_len = sizeof(*s->status),
1502      };
1503      ssize_t dev_written = -EINVAL;
1504  
1505      out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1506                               s->cvq_cmd_out_buffer,
1507                               vhost_vdpa_net_cvq_cmd_page_len());
1508  
1509      ctrl = s->cvq_cmd_out_buffer;
1510      if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1511          /*
1512           * Guest announce capability is emulated by qemu, so don't forward to
1513           * the device.
1514           */
1515          dev_written = sizeof(status);
1516          *s->status = VIRTIO_NET_OK;
1517      } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1518                          ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1519                          iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1520          /*
1521           * Due to the size limitation of the out buffer sent to the vdpa device,
1522           * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1523           * MAC addresses set by the driver for the filter table can cause
1524           * truncation of the CVQ command in QEMU. As a result, the vdpa device
1525           * rejects the flawed CVQ command.
1526           *
1527           * Therefore, QEMU must handle this situation instead of sending
1528           * the CVQ command directly.
1529           */
1530          dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1531                                                              &out, &vdpa_in);
1532          if (unlikely(dev_written < 0)) {
1533              goto out;
1534          }
1535      } else {
1536          ssize_t r;
1537          r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1538          if (unlikely(r < 0)) {
1539              dev_written = r;
1540              goto out;
1541          }
1542  
1543          /*
1544           * We can poll here since we've had BQL from the time
1545           * we sent the descriptor.
1546           */
1547          dev_written = vhost_vdpa_net_svq_poll(s, 1);
1548      }
1549  
1550      if (unlikely(dev_written < sizeof(status))) {
1551          error_report("Insufficient written data (%zu)", dev_written);
1552          goto out;
1553      }
1554  
1555      if (*s->status != VIRTIO_NET_OK) {
1556          goto out;
1557      }
1558  
1559      status = VIRTIO_NET_ERR;
1560      virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1561      if (status != VIRTIO_NET_OK) {
1562          error_report("Bad CVQ processing in model");
1563      }
1564  
1565  out:
1566      in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1567                            sizeof(status));
1568      if (unlikely(in_len < sizeof(status))) {
1569          error_report("Bad device CVQ written length");
1570      }
1571      vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1572      /*
1573       * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1574       * the function successfully forwards the CVQ command, indicated
1575       * by a non-negative value of `dev_written`. Otherwise, it still
1576       * belongs to SVQ.
1577       * This function should only free the `elem` when it owns.
1578       */
1579      if (dev_written >= 0) {
1580          g_free(elem);
1581      }
1582      return dev_written < 0 ? dev_written : 0;
1583  }
1584  
1585  static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1586      .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1587  };
1588  
1589  /**
1590   * Probe if CVQ is isolated
1591   *
1592   * @device_fd         The vdpa device fd
1593   * @features          Features offered by the device.
1594   * @cvq_index         The control vq pair index
1595   *
1596   * Returns <0 in case of failure, 0 if false and 1 if true.
1597   */
vhost_vdpa_probe_cvq_isolation(int device_fd,uint64_t features,int cvq_index,Error ** errp)1598  static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1599                                            int cvq_index, Error **errp)
1600  {
1601      ERRP_GUARD();
1602      uint64_t backend_features;
1603      int64_t cvq_group;
1604      uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1605                       VIRTIO_CONFIG_S_DRIVER;
1606      int r;
1607  
1608      r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1609      if (unlikely(r < 0)) {
1610          error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1611          return r;
1612      }
1613  
1614      if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1615          return 0;
1616      }
1617  
1618      r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1619      if (unlikely(r)) {
1620          error_setg_errno(errp, -r, "Cannot set device status");
1621          goto out;
1622      }
1623  
1624      r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1625      if (unlikely(r)) {
1626          error_setg_errno(errp, -r, "Cannot set features");
1627          goto out;
1628      }
1629  
1630      status |= VIRTIO_CONFIG_S_FEATURES_OK;
1631      r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1632      if (unlikely(r)) {
1633          error_setg_errno(errp, -r, "Cannot set device status");
1634          goto out;
1635      }
1636  
1637      cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1638      if (unlikely(cvq_group < 0)) {
1639          if (cvq_group != -ENOTSUP) {
1640              r = cvq_group;
1641              goto out;
1642          }
1643  
1644          /*
1645           * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1646           * support ASID even if the parent driver does not.  The CVQ cannot be
1647           * isolated in this case.
1648           */
1649          error_free(*errp);
1650          *errp = NULL;
1651          r = 0;
1652          goto out;
1653      }
1654  
1655      for (int i = 0; i < cvq_index; ++i) {
1656          int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1657          if (unlikely(group < 0)) {
1658              r = group;
1659              goto out;
1660          }
1661  
1662          if (group == (int64_t)cvq_group) {
1663              r = 0;
1664              goto out;
1665          }
1666      }
1667  
1668      r = 1;
1669  
1670  out:
1671      status = 0;
1672      ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1673      return r;
1674  }
1675  
net_vhost_vdpa_init(NetClientState * peer,const char * device,const char * name,int vdpa_device_fd,int queue_pair_index,int nvqs,bool is_datapath,bool svq,struct vhost_vdpa_iova_range iova_range,uint64_t features,VhostVDPAShared * shared,Error ** errp)1676  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1677                                         const char *device,
1678                                         const char *name,
1679                                         int vdpa_device_fd,
1680                                         int queue_pair_index,
1681                                         int nvqs,
1682                                         bool is_datapath,
1683                                         bool svq,
1684                                         struct vhost_vdpa_iova_range iova_range,
1685                                         uint64_t features,
1686                                         VhostVDPAShared *shared,
1687                                         Error **errp)
1688  {
1689      NetClientState *nc = NULL;
1690      VhostVDPAState *s;
1691      int ret = 0;
1692      assert(name);
1693      int cvq_isolated = 0;
1694  
1695      if (is_datapath) {
1696          nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1697                                   name);
1698      } else {
1699          cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1700                                                        queue_pair_index * 2,
1701                                                        errp);
1702          if (unlikely(cvq_isolated < 0)) {
1703              return NULL;
1704          }
1705  
1706          nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1707                                           device, name);
1708      }
1709      qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1710      s = DO_UPCAST(VhostVDPAState, nc, nc);
1711  
1712      s->vhost_vdpa.index = queue_pair_index;
1713      s->always_svq = svq;
1714      s->migration_state.notify = NULL;
1715      s->vhost_vdpa.shadow_vqs_enabled = svq;
1716      if (queue_pair_index == 0) {
1717          vhost_vdpa_net_valid_svq_features(features,
1718                                            &s->vhost_vdpa.migration_blocker);
1719          s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
1720          s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
1721          s->vhost_vdpa.shared->iova_range = iova_range;
1722          s->vhost_vdpa.shared->shadow_data = svq;
1723      } else if (!is_datapath) {
1724          s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1725                                       PROT_READ | PROT_WRITE,
1726                                       MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1727          s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1728                           PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1729                           -1, 0);
1730  
1731          s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1732          s->vhost_vdpa.shadow_vq_ops_opaque = s;
1733          s->cvq_isolated = cvq_isolated;
1734      }
1735      if (queue_pair_index != 0) {
1736          s->vhost_vdpa.shared = shared;
1737      }
1738  
1739      ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1740      if (ret) {
1741          qemu_del_net_client(nc);
1742          return NULL;
1743      }
1744  
1745      return nc;
1746  }
1747  
vhost_vdpa_get_features(int fd,uint64_t * features,Error ** errp)1748  static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1749  {
1750      int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1751      if (unlikely(ret < 0)) {
1752          error_setg_errno(errp, errno,
1753                           "Fail to query features from vhost-vDPA device");
1754      }
1755      return ret;
1756  }
1757  
vhost_vdpa_get_max_queue_pairs(int fd,uint64_t features,int * has_cvq,Error ** errp)1758  static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1759                                            int *has_cvq, Error **errp)
1760  {
1761      unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1762      g_autofree struct vhost_vdpa_config *config = NULL;
1763      __virtio16 *max_queue_pairs;
1764      int ret;
1765  
1766      if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1767          *has_cvq = 1;
1768      } else {
1769          *has_cvq = 0;
1770      }
1771  
1772      if (features & (1 << VIRTIO_NET_F_MQ)) {
1773          config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1774          config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1775          config->len = sizeof(*max_queue_pairs);
1776  
1777          ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1778          if (ret) {
1779              error_setg(errp, "Fail to get config from vhost-vDPA device");
1780              return -ret;
1781          }
1782  
1783          max_queue_pairs = (__virtio16 *)&config->buf;
1784  
1785          return lduw_le_p(max_queue_pairs);
1786      }
1787  
1788      return 1;
1789  }
1790  
net_init_vhost_vdpa(const Netdev * netdev,const char * name,NetClientState * peer,Error ** errp)1791  int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1792                          NetClientState *peer, Error **errp)
1793  {
1794      ERRP_GUARD();
1795      const NetdevVhostVDPAOptions *opts;
1796      uint64_t features;
1797      int vdpa_device_fd;
1798      g_autofree NetClientState **ncs = NULL;
1799      struct vhost_vdpa_iova_range iova_range;
1800      NetClientState *nc;
1801      int queue_pairs, r, i = 0, has_cvq = 0;
1802  
1803      assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1804      opts = &netdev->u.vhost_vdpa;
1805      if (!opts->vhostdev && !opts->vhostfd) {
1806          error_setg(errp,
1807                     "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1808          return -1;
1809      }
1810  
1811      if (opts->vhostdev && opts->vhostfd) {
1812          error_setg(errp,
1813                     "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1814          return -1;
1815      }
1816  
1817      if (opts->vhostdev) {
1818          vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1819          if (vdpa_device_fd == -1) {
1820              return -errno;
1821          }
1822      } else {
1823          /* has_vhostfd */
1824          vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1825          if (vdpa_device_fd == -1) {
1826              error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1827              return -1;
1828          }
1829      }
1830  
1831      r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1832      if (unlikely(r < 0)) {
1833          goto err;
1834      }
1835  
1836      queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1837                                                   &has_cvq, errp);
1838      if (queue_pairs < 0) {
1839          qemu_close(vdpa_device_fd);
1840          return queue_pairs;
1841      }
1842  
1843      r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1844      if (unlikely(r < 0)) {
1845          error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1846                     strerror(-r));
1847          goto err;
1848      }
1849  
1850      if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1851          goto err;
1852      }
1853  
1854      ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1855  
1856      for (i = 0; i < queue_pairs; i++) {
1857          VhostVDPAShared *shared = NULL;
1858  
1859          if (i) {
1860              shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
1861          }
1862          ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1863                                       vdpa_device_fd, i, 2, true, opts->x_svq,
1864                                       iova_range, features, shared, errp);
1865          if (!ncs[i])
1866              goto err;
1867      }
1868  
1869      if (has_cvq) {
1870          VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
1871          VhostVDPAShared *shared = s0->vhost_vdpa.shared;
1872  
1873          nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1874                                   vdpa_device_fd, i, 1, false,
1875                                   opts->x_svq, iova_range, features, shared,
1876                                   errp);
1877          if (!nc)
1878              goto err;
1879      }
1880  
1881      return 0;
1882  
1883  err:
1884      if (i) {
1885          for (i--; i >= 0; i--) {
1886              qemu_del_net_client(ncs[i]);
1887          }
1888      }
1889  
1890      qemu_close(vdpa_device_fd);
1891  
1892      return -1;
1893  }
1894