xref: /openbmc/qemu/hw/virtio/vhost-vdpa.c (revision 3df4c288)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "exec/target_page.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "exec/address-spaces.h"
24 #include "migration/blocker.h"
25 #include "qemu/cutils.h"
26 #include "qemu/main-loop.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 
30 /*
31  * Return one past the end of the end of section. Be careful with uint64_t
32  * conversions!
33  */
34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section,
35                                      int page_mask)
36 {
37     Int128 llend = int128_make64(section->offset_within_address_space);
38     llend = int128_add(llend, section->size);
39     llend = int128_and(llend, int128_exts64(page_mask));
40 
41     return llend;
42 }
43 
44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
45                                                 uint64_t iova_min,
46                                                 uint64_t iova_max,
47                                                 int page_mask)
48 {
49     Int128 llend;
50     bool is_ram = memory_region_is_ram(section->mr);
51     bool is_iommu = memory_region_is_iommu(section->mr);
52     bool is_protected = memory_region_is_protected(section->mr);
53 
54     /* vhost-vDPA doesn't allow MMIO to be mapped  */
55     bool is_ram_device = memory_region_is_ram_device(section->mr);
56 
57     if ((!is_ram && !is_iommu) || is_protected || is_ram_device) {
58         trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected,
59                                                 is_ram_device, iova_min,
60                                                 iova_max, page_mask);
61         return true;
62     }
63 
64     if (section->offset_within_address_space < iova_min) {
65         error_report("RAM section out of device range (min=0x%" PRIx64
66                      ", addr=0x%" HWADDR_PRIx ")",
67                      iova_min, section->offset_within_address_space);
68         return true;
69     }
70     /*
71      * While using vIOMMU, sometimes the section will be larger than iova_max,
72      * but the memory that actually maps is smaller, so move the check to
73      * function vhost_vdpa_iommu_map_notify(). That function will use the actual
74      * size that maps to the kernel
75      */
76 
77     if (!is_iommu) {
78         llend = vhost_vdpa_section_end(section, page_mask);
79         if (int128_gt(llend, int128_make64(iova_max))) {
80             error_report("RAM section out of device range (max=0x%" PRIx64
81                          ", end addr=0x%" PRIx64 ")",
82                          iova_max, int128_get64(llend));
83             return true;
84         }
85     }
86 
87     return false;
88 }
89 
90 /*
91  * The caller must set asid = 0 if the device does not support asid.
92  * This is not an ABI break since it is set to 0 by the initializer anyway.
93  */
94 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
95                        hwaddr size, void *vaddr, bool readonly)
96 {
97     struct vhost_msg_v2 msg = {};
98     int fd = s->device_fd;
99     int ret = 0;
100 
101     msg.type = VHOST_IOTLB_MSG_V2;
102     msg.asid = asid;
103     msg.iotlb.iova = iova;
104     msg.iotlb.size = size;
105     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
106     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
107     msg.iotlb.type = VHOST_IOTLB_UPDATE;
108 
109     trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
110                              msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
111                              msg.iotlb.type);
112 
113     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114         error_report("failed to write, fd=%d, errno=%d (%s)",
115             fd, errno, strerror(errno));
116         return -EIO ;
117     }
118 
119     return ret;
120 }
121 
122 /*
123  * The caller must set asid = 0 if the device does not support asid.
124  * This is not an ABI break since it is set to 0 by the initializer anyway.
125  */
126 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
127                          hwaddr size)
128 {
129     struct vhost_msg_v2 msg = {};
130     int fd = s->device_fd;
131     int ret = 0;
132 
133     msg.type = VHOST_IOTLB_MSG_V2;
134     msg.asid = asid;
135     msg.iotlb.iova = iova;
136     msg.iotlb.size = size;
137     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
138 
139     trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
140                                msg.iotlb.size, msg.iotlb.type);
141 
142     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
143         error_report("failed to write, fd=%d, errno=%d (%s)",
144             fd, errno, strerror(errno));
145         return -EIO ;
146     }
147 
148     return ret;
149 }
150 
151 static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
152 {
153     int fd = s->device_fd;
154     struct vhost_msg_v2 msg = {
155         .type = VHOST_IOTLB_MSG_V2,
156         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
157     };
158 
159     trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
160     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
161         error_report("failed to write, fd=%d, errno=%d (%s)",
162                      fd, errno, strerror(errno));
163     }
164 }
165 
166 static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
167 {
168     if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
169         !s->iotlb_batch_begin_sent) {
170         vhost_vdpa_listener_begin_batch(s);
171     }
172 
173     s->iotlb_batch_begin_sent = true;
174 }
175 
176 static void vhost_vdpa_listener_commit(MemoryListener *listener)
177 {
178     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
179     struct vhost_msg_v2 msg = {};
180     int fd = s->device_fd;
181 
182     if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
183         return;
184     }
185 
186     if (!s->iotlb_batch_begin_sent) {
187         return;
188     }
189 
190     msg.type = VHOST_IOTLB_MSG_V2;
191     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
192 
193     trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type);
194     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
195         error_report("failed to write, fd=%d, errno=%d (%s)",
196                      fd, errno, strerror(errno));
197     }
198 
199     s->iotlb_batch_begin_sent = false;
200 }
201 
202 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
203 {
204     struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
205 
206     hwaddr iova = iotlb->iova + iommu->iommu_offset;
207     VhostVDPAShared *s = iommu->dev_shared;
208     void *vaddr;
209     int ret;
210     Int128 llend;
211 
212     if (iotlb->target_as != &address_space_memory) {
213         error_report("Wrong target AS \"%s\", only system memory is allowed",
214                      iotlb->target_as->name ? iotlb->target_as->name : "none");
215         return;
216     }
217     RCU_READ_LOCK_GUARD();
218     /* check if RAM section out of device range */
219     llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
220     if (int128_gt(llend, int128_make64(s->iova_range.last))) {
221         error_report("RAM section out of device range (max=0x%" PRIx64
222                      ", end addr=0x%" PRIx64 ")",
223                      s->iova_range.last, int128_get64(llend));
224         return;
225     }
226 
227     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
228         bool read_only;
229 
230         if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL)) {
231             return;
232         }
233         ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
234                                  iotlb->addr_mask + 1, vaddr, read_only);
235         if (ret) {
236             error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
237                          "0x%" HWADDR_PRIx ", %p) = %d (%m)",
238                          s, iova, iotlb->addr_mask + 1, vaddr, ret);
239         }
240     } else {
241         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
242                                    iotlb->addr_mask + 1);
243         if (ret) {
244             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
245                          "0x%" HWADDR_PRIx ") = %d (%m)",
246                          s, iova, iotlb->addr_mask + 1, ret);
247         }
248     }
249 }
250 
251 static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
252                                         MemoryRegionSection *section)
253 {
254     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
255 
256     struct vdpa_iommu *iommu;
257     Int128 end;
258     int iommu_idx;
259     IOMMUMemoryRegion *iommu_mr;
260     int ret;
261 
262     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
263 
264     iommu = g_malloc0(sizeof(*iommu));
265     end = int128_add(int128_make64(section->offset_within_region),
266                      section->size);
267     end = int128_sub(end, int128_one());
268     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
269                                                    MEMTXATTRS_UNSPECIFIED);
270     iommu->iommu_mr = iommu_mr;
271     iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify,
272                         IOMMU_NOTIFIER_IOTLB_EVENTS,
273                         section->offset_within_region,
274                         int128_get64(end),
275                         iommu_idx);
276     iommu->iommu_offset = section->offset_within_address_space -
277                           section->offset_within_region;
278     iommu->dev_shared = s;
279 
280     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
281     if (ret) {
282         g_free(iommu);
283         return;
284     }
285 
286     QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next);
287     memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
288 
289     return;
290 }
291 
292 static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
293                                         MemoryRegionSection *section)
294 {
295     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
296 
297     struct vdpa_iommu *iommu;
298 
299     QLIST_FOREACH(iommu, &s->iommu_list, iommu_next)
300     {
301         if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
302             iommu->n.start == section->offset_within_region) {
303             memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
304             QLIST_REMOVE(iommu, iommu_next);
305             g_free(iommu);
306             break;
307         }
308     }
309 }
310 
311 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
312                                            MemoryRegionSection *section)
313 {
314     DMAMap mem_region = {};
315     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
316     hwaddr iova;
317     Int128 llend, llsize;
318     void *vaddr;
319     int ret;
320     int page_size = qemu_target_page_size();
321     int page_mask = -page_size;
322 
323     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
324                                             s->iova_range.last, page_mask)) {
325         return;
326     }
327     if (memory_region_is_iommu(section->mr)) {
328         vhost_vdpa_iommu_region_add(listener, section);
329         return;
330     }
331 
332     if (unlikely((section->offset_within_address_space & ~page_mask) !=
333                  (section->offset_within_region & ~page_mask))) {
334         trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name,
335                        section->offset_within_address_space & ~page_mask,
336                        section->offset_within_region & ~page_mask);
337         return;
338     }
339 
340     iova = ROUND_UP(section->offset_within_address_space, page_size);
341     llend = vhost_vdpa_section_end(section, page_mask);
342     if (int128_ge(int128_make64(iova), llend)) {
343         return;
344     }
345 
346     memory_region_ref(section->mr);
347 
348     /* Here we assume that memory_region_is_ram(section->mr)==true */
349 
350     vaddr = memory_region_get_ram_ptr(section->mr) +
351             section->offset_within_region +
352             (iova - section->offset_within_address_space);
353 
354     trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend),
355                                          vaddr, section->readonly);
356 
357     llsize = int128_sub(llend, int128_make64(iova));
358     if (s->shadow_data) {
359         int r;
360 
361         mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
362         mem_region.size = int128_get64(llsize) - 1,
363         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
364 
365         r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region);
366         if (unlikely(r != IOVA_OK)) {
367             error_report("Can't allocate a mapping (%d)", r);
368             goto fail;
369         }
370 
371         iova = mem_region.iova;
372     }
373 
374     vhost_vdpa_iotlb_batch_begin_once(s);
375     ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
376                              int128_get64(llsize), vaddr, section->readonly);
377     if (ret) {
378         error_report("vhost vdpa map fail!");
379         goto fail_map;
380     }
381 
382     return;
383 
384 fail_map:
385     if (s->shadow_data) {
386         vhost_iova_tree_remove(s->iova_tree, mem_region);
387     }
388 
389 fail:
390     /*
391      * On the initfn path, store the first error in the container so we
392      * can gracefully fail.  Runtime, there's not much we can do other
393      * than throw a hardware error.
394      */
395     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
396     return;
397 
398 }
399 
400 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
401                                            MemoryRegionSection *section)
402 {
403     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
404     hwaddr iova;
405     Int128 llend, llsize;
406     int ret;
407     int page_size = qemu_target_page_size();
408     int page_mask = -page_size;
409 
410     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
411                                             s->iova_range.last, page_mask)) {
412         return;
413     }
414     if (memory_region_is_iommu(section->mr)) {
415         vhost_vdpa_iommu_region_del(listener, section);
416     }
417 
418     if (unlikely((section->offset_within_address_space & ~page_mask) !=
419                  (section->offset_within_region & ~page_mask))) {
420         trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name,
421                        section->offset_within_address_space & ~page_mask,
422                        section->offset_within_region & ~page_mask);
423         return;
424     }
425 
426     iova = ROUND_UP(section->offset_within_address_space, page_size);
427     llend = vhost_vdpa_section_end(section, page_mask);
428 
429     trace_vhost_vdpa_listener_region_del(s, iova,
430         int128_get64(int128_sub(llend, int128_one())));
431 
432     if (int128_ge(int128_make64(iova), llend)) {
433         return;
434     }
435 
436     llsize = int128_sub(llend, int128_make64(iova));
437 
438     if (s->shadow_data) {
439         const DMAMap *result;
440         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
441             section->offset_within_region +
442             (iova - section->offset_within_address_space);
443         DMAMap mem_region = {
444             .translated_addr = (hwaddr)(uintptr_t)vaddr,
445             .size = int128_get64(llsize) - 1,
446         };
447 
448         result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region);
449         if (!result) {
450             /* The memory listener map wasn't mapped */
451             return;
452         }
453         iova = result->iova;
454         vhost_iova_tree_remove(s->iova_tree, *result);
455     }
456     vhost_vdpa_iotlb_batch_begin_once(s);
457     /*
458      * The unmap ioctl doesn't accept a full 64-bit. need to check it
459      */
460     if (int128_eq(llsize, int128_2_64())) {
461         llsize = int128_rshift(llsize, 1);
462         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
463                                    int128_get64(llsize));
464 
465         if (ret) {
466             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
467                          "0x%" HWADDR_PRIx ") = %d (%m)",
468                          s, iova, int128_get64(llsize), ret);
469         }
470         iova += int128_get64(llsize);
471     }
472     ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
473                                int128_get64(llsize));
474 
475     if (ret) {
476         error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
477                      "0x%" HWADDR_PRIx ") = %d (%m)",
478                      s, iova, int128_get64(llsize), ret);
479     }
480 
481     memory_region_unref(section->mr);
482 }
483 /*
484  * IOTLB API is used by vhost-vdpa which requires incremental updating
485  * of the mapping. So we can not use generic vhost memory listener which
486  * depends on the addnop().
487  */
488 static const MemoryListener vhost_vdpa_memory_listener = {
489     .name = "vhost-vdpa",
490     .commit = vhost_vdpa_listener_commit,
491     .region_add = vhost_vdpa_listener_region_add,
492     .region_del = vhost_vdpa_listener_region_del,
493 };
494 
495 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
496                              void *arg)
497 {
498     struct vhost_vdpa *v = dev->opaque;
499     int fd = v->shared->device_fd;
500     int ret;
501 
502     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
503 
504     ret = ioctl(fd, request, arg);
505     return ret < 0 ? -errno : ret;
506 }
507 
508 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
509 {
510     uint8_t s;
511     int ret;
512 
513     trace_vhost_vdpa_add_status(dev, status);
514     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
515     if (ret < 0) {
516         return ret;
517     }
518     if ((s & status) == status) {
519         /* Don't set bits already set */
520         return 0;
521     }
522 
523     s |= status;
524 
525     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
526     if (ret < 0) {
527         return ret;
528     }
529 
530     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
531     if (ret < 0) {
532         return ret;
533     }
534 
535     if (!(s & status)) {
536         return -EIO;
537     }
538 
539     return 0;
540 }
541 
542 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range)
543 {
544     int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
545 
546     return ret < 0 ? -errno : 0;
547 }
548 
549 /*
550  * The use of this function is for requests that only need to be
551  * applied once. Typically such request occurs at the beginning
552  * of operation, and before setting up queues. It should not be
553  * used for request that performs operation until all queues are
554  * set, which would need to check dev->vq_index_end instead.
555  */
556 static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
557 {
558     struct vhost_vdpa *v = dev->opaque;
559 
560     return v->index == 0;
561 }
562 
563 static bool vhost_vdpa_last_dev(struct vhost_dev *dev)
564 {
565     return dev->vq_index + dev->nvqs == dev->vq_index_end;
566 }
567 
568 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
569                                        uint64_t *features)
570 {
571     int ret;
572 
573     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
574     trace_vhost_vdpa_get_features(dev, *features);
575     return ret;
576 }
577 
578 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
579 {
580     g_autoptr(GPtrArray) shadow_vqs = NULL;
581 
582     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
583     for (unsigned n = 0; n < hdev->nvqs; ++n) {
584         VhostShadowVirtqueue *svq;
585 
586         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
587         g_ptr_array_add(shadow_vqs, svq);
588     }
589 
590     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
591 }
592 
593 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
594 {
595     struct vhost_vdpa *v = opaque;
596     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
597     trace_vhost_vdpa_init(dev, v->shared, opaque);
598     int ret;
599 
600     v->dev = dev;
601     dev->opaque =  opaque ;
602     v->shared->listener = vhost_vdpa_memory_listener;
603     vhost_vdpa_init_svq(dev, v);
604 
605     error_propagate(&dev->migration_blocker, v->migration_blocker);
606     if (!vhost_vdpa_first_dev(dev)) {
607         return 0;
608     }
609 
610     /*
611      * If dev->shadow_vqs_enabled at initialization that means the device has
612      * been started with x-svq=on, so don't block migration
613      */
614     if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) {
615         /* We don't have dev->features yet */
616         uint64_t features;
617         ret = vhost_vdpa_get_dev_features(dev, &features);
618         if (unlikely(ret)) {
619             error_setg_errno(errp, -ret, "Could not get device features");
620             return ret;
621         }
622         vhost_svq_valid_features(features, &dev->migration_blocker);
623     }
624 
625     /*
626      * Similar to VFIO, we end up pinning all guest memory and have to
627      * disable discarding of RAM.
628      */
629     ret = ram_block_discard_disable(true);
630     if (ret) {
631         error_report("Cannot set discarding of RAM broken");
632         return ret;
633     }
634 
635     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
636                                VIRTIO_CONFIG_S_DRIVER);
637 
638     return 0;
639 }
640 
641 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
642                                             int queue_index)
643 {
644     size_t page_size = qemu_real_host_page_size();
645     struct vhost_vdpa *v = dev->opaque;
646     VirtIODevice *vdev = dev->vdev;
647     VhostVDPAHostNotifier *n;
648 
649     n = &v->notifier[queue_index];
650 
651     if (n->addr) {
652         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
653         object_unparent(OBJECT(&n->mr));
654         munmap(n->addr, page_size);
655         n->addr = NULL;
656     }
657 }
658 
659 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
660 {
661     size_t page_size = qemu_real_host_page_size();
662     struct vhost_vdpa *v = dev->opaque;
663     VirtIODevice *vdev = dev->vdev;
664     VhostVDPAHostNotifier *n;
665     int fd = v->shared->device_fd;
666     void *addr;
667     char *name;
668 
669     vhost_vdpa_host_notifier_uninit(dev, queue_index);
670 
671     n = &v->notifier[queue_index];
672 
673     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
674                 queue_index * page_size);
675     if (addr == MAP_FAILED) {
676         goto err;
677     }
678 
679     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
680                            v, queue_index);
681     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
682                                       page_size, addr);
683     g_free(name);
684 
685     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
686         object_unparent(OBJECT(&n->mr));
687         munmap(addr, page_size);
688         goto err;
689     }
690     n->addr = addr;
691 
692     return 0;
693 
694 err:
695     return -1;
696 }
697 
698 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
699 {
700     int i;
701 
702     /*
703      * Pack all the changes to the memory regions in a single
704      * transaction to avoid a few updating of the address space
705      * topology.
706      */
707     memory_region_transaction_begin();
708 
709     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
710         vhost_vdpa_host_notifier_uninit(dev, i);
711     }
712 
713     memory_region_transaction_commit();
714 }
715 
716 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
717 {
718     struct vhost_vdpa *v = dev->opaque;
719     int i;
720 
721     if (v->shadow_vqs_enabled) {
722         /* FIXME SVQ is not compatible with host notifiers mr */
723         return;
724     }
725 
726     /*
727      * Pack all the changes to the memory regions in a single
728      * transaction to avoid a few updating of the address space
729      * topology.
730      */
731     memory_region_transaction_begin();
732 
733     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
734         if (vhost_vdpa_host_notifier_init(dev, i)) {
735             vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
736             break;
737         }
738     }
739 
740     memory_region_transaction_commit();
741 }
742 
743 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
744 {
745     struct vhost_vdpa *v = dev->opaque;
746     size_t idx;
747 
748     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
749         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
750     }
751     g_ptr_array_free(v->shadow_vqs, true);
752 }
753 
754 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
755 {
756     struct vhost_vdpa *v;
757     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
758     v = dev->opaque;
759     trace_vhost_vdpa_cleanup(dev, v);
760     if (vhost_vdpa_first_dev(dev)) {
761         ram_block_discard_disable(false);
762         memory_listener_unregister(&v->shared->listener);
763     }
764 
765     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
766     vhost_vdpa_svq_cleanup(dev);
767 
768     dev->opaque = NULL;
769 
770     return 0;
771 }
772 
773 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
774 {
775     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
776     return INT_MAX;
777 }
778 
779 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
780                                     struct vhost_memory *mem)
781 {
782     if (!vhost_vdpa_first_dev(dev)) {
783         return 0;
784     }
785 
786     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
787     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
788         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
789         int i;
790         for (i = 0; i < mem->nregions; i++) {
791             trace_vhost_vdpa_dump_regions(dev, i,
792                                           mem->regions[i].guest_phys_addr,
793                                           mem->regions[i].memory_size,
794                                           mem->regions[i].userspace_addr,
795                                           mem->regions[i].flags_padding);
796         }
797     }
798     if (mem->padding) {
799         return -EINVAL;
800     }
801 
802     return 0;
803 }
804 
805 static int vhost_vdpa_set_features(struct vhost_dev *dev,
806                                    uint64_t features)
807 {
808     struct vhost_vdpa *v = dev->opaque;
809     int ret;
810 
811     if (!vhost_vdpa_first_dev(dev)) {
812         return 0;
813     }
814 
815     if (v->shadow_vqs_enabled) {
816         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
817             /*
818              * QEMU is just trying to enable or disable logging. SVQ handles
819              * this sepparately, so no need to forward this.
820              */
821             v->acked_features = features;
822             return 0;
823         }
824 
825         v->acked_features = features;
826 
827         /* We must not ack _F_LOG if SVQ is enabled */
828         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
829     }
830 
831     trace_vhost_vdpa_set_features(dev, features);
832     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
833     if (ret) {
834         return ret;
835     }
836 
837     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
838 }
839 
840 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
841 {
842     struct vhost_vdpa *v = dev->opaque;
843 
844     uint64_t features;
845     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
846         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
847         0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
848         0x1ULL << VHOST_BACKEND_F_SUSPEND;
849     int r;
850 
851     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
852         return -EFAULT;
853     }
854 
855     features &= f;
856 
857     if (vhost_vdpa_first_dev(dev)) {
858         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
859         if (r) {
860             return -EFAULT;
861         }
862     }
863 
864     dev->backend_cap = features;
865     v->shared->backend_cap = features;
866 
867     return 0;
868 }
869 
870 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
871                                     uint32_t *device_id)
872 {
873     int ret;
874     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
875     trace_vhost_vdpa_get_device_id(dev, *device_id);
876     return ret;
877 }
878 
879 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
880 {
881     struct vhost_vdpa *v = dev->opaque;
882     int ret;
883     uint8_t status = 0;
884 
885     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
886     trace_vhost_vdpa_reset_device(dev);
887     v->suspended = false;
888     return ret;
889 }
890 
891 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
892 {
893     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
894 
895     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
896     return idx;
897 }
898 
899 static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx,
900                                            int enable)
901 {
902     struct vhost_dev *dev = v->dev;
903     struct vhost_vring_state state = {
904         .index = idx,
905         .num = enable,
906     };
907     int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
908 
909     trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r);
910     return r;
911 }
912 
913 static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable)
914 {
915     struct vhost_vdpa *v = dev->opaque;
916     unsigned int i;
917     int ret;
918 
919     for (i = 0; i < dev->nvqs; ++i) {
920         ret = vhost_vdpa_set_vring_enable_one(v, i, enable);
921         if (ret < 0) {
922             return ret;
923         }
924     }
925 
926     return 0;
927 }
928 
929 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
930 {
931     return vhost_vdpa_set_vring_enable_one(v, idx, 1);
932 }
933 
934 static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
935                                        int fd)
936 {
937     trace_vhost_vdpa_set_config_call(dev, fd);
938     return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd);
939 }
940 
941 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
942                                    uint32_t config_len)
943 {
944     int b, len;
945     char line[QEMU_HEXDUMP_LINE_LEN];
946 
947     for (b = 0; b < config_len; b += 16) {
948         len = config_len - b;
949         qemu_hexdump_line(line, b, config, len, false);
950         trace_vhost_vdpa_dump_config(dev, line);
951     }
952 }
953 
954 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
955                                    uint32_t offset, uint32_t size,
956                                    uint32_t flags)
957 {
958     struct vhost_vdpa_config *config;
959     int ret;
960     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
961 
962     trace_vhost_vdpa_set_config(dev, offset, size, flags);
963     config = g_malloc(size + config_size);
964     config->off = offset;
965     config->len = size;
966     memcpy(config->buf, data, size);
967     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
968         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
969         vhost_vdpa_dump_config(dev, data, size);
970     }
971     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
972     g_free(config);
973     return ret;
974 }
975 
976 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
977                                    uint32_t config_len, Error **errp)
978 {
979     struct vhost_vdpa_config *v_config;
980     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
981     int ret;
982 
983     trace_vhost_vdpa_get_config(dev, config, config_len);
984     v_config = g_malloc(config_len + config_size);
985     v_config->len = config_len;
986     v_config->off = 0;
987     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
988     memcpy(config, v_config->buf, config_len);
989     g_free(v_config);
990     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
991         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
992         vhost_vdpa_dump_config(dev, config, config_len);
993     }
994     return ret;
995  }
996 
997 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
998                                          struct vhost_vring_state *ring)
999 {
1000     struct vhost_vdpa *v = dev->opaque;
1001 
1002     trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num,
1003                                         v->shadow_vqs_enabled);
1004     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
1005 }
1006 
1007 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
1008                                          struct vhost_vring_file *file)
1009 {
1010     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
1011     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
1012 }
1013 
1014 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
1015                                          struct vhost_vring_file *file)
1016 {
1017     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
1018     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
1019 }
1020 
1021 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
1022                                          struct vhost_vring_addr *addr)
1023 {
1024     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
1025                                 addr->desc_user_addr, addr->used_user_addr,
1026                                 addr->avail_user_addr,
1027                                 addr->log_guest_addr);
1028 
1029     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
1030 
1031 }
1032 
1033 /**
1034  * Set the shadow virtqueue descriptors to the device
1035  *
1036  * @dev: The vhost device model
1037  * @svq: The shadow virtqueue
1038  * @idx: The index of the virtqueue in the vhost device
1039  * @errp: Error
1040  *
1041  * Note that this function does not rewind kick file descriptor if cannot set
1042  * call one.
1043  */
1044 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
1045                                   VhostShadowVirtqueue *svq, unsigned idx,
1046                                   Error **errp)
1047 {
1048     struct vhost_vring_file file = {
1049         .index = dev->vq_index + idx,
1050     };
1051     const EventNotifier *event_notifier = &svq->hdev_kick;
1052     int r;
1053 
1054     r = event_notifier_init(&svq->hdev_kick, 0);
1055     if (r != 0) {
1056         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
1057         goto err_init_hdev_kick;
1058     }
1059 
1060     r = event_notifier_init(&svq->hdev_call, 0);
1061     if (r != 0) {
1062         error_setg_errno(errp, -r, "Couldn't create call event notifier");
1063         goto err_init_hdev_call;
1064     }
1065 
1066     file.fd = event_notifier_get_fd(event_notifier);
1067     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
1068     if (unlikely(r != 0)) {
1069         error_setg_errno(errp, -r, "Can't set device kick fd");
1070         goto err_init_set_dev_fd;
1071     }
1072 
1073     event_notifier = &svq->hdev_call;
1074     file.fd = event_notifier_get_fd(event_notifier);
1075     r = vhost_vdpa_set_vring_dev_call(dev, &file);
1076     if (unlikely(r != 0)) {
1077         error_setg_errno(errp, -r, "Can't set device call fd");
1078         goto err_init_set_dev_fd;
1079     }
1080 
1081     return 0;
1082 
1083 err_init_set_dev_fd:
1084     event_notifier_set_handler(&svq->hdev_call, NULL);
1085 
1086 err_init_hdev_call:
1087     event_notifier_cleanup(&svq->hdev_kick);
1088 
1089 err_init_hdev_kick:
1090     return r;
1091 }
1092 
1093 /**
1094  * Unmap a SVQ area in the device
1095  */
1096 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
1097 {
1098     const DMAMap needle = {
1099         .translated_addr = addr,
1100     };
1101     const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree,
1102                                                      &needle);
1103     hwaddr size;
1104     int r;
1105 
1106     if (unlikely(!result)) {
1107         error_report("Unable to find SVQ address to unmap");
1108         return;
1109     }
1110 
1111     size = ROUND_UP(result->size, qemu_real_host_page_size());
1112     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
1113                              size);
1114     if (unlikely(r < 0)) {
1115         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
1116         return;
1117     }
1118 
1119     vhost_iova_tree_remove(v->shared->iova_tree, *result);
1120 }
1121 
1122 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
1123                                        const VhostShadowVirtqueue *svq)
1124 {
1125     struct vhost_vdpa *v = dev->opaque;
1126     struct vhost_vring_addr svq_addr;
1127 
1128     vhost_svq_get_vring_addr(svq, &svq_addr);
1129 
1130     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
1131 
1132     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
1133 }
1134 
1135 /**
1136  * Map the SVQ area in the device
1137  *
1138  * @v: Vhost-vdpa device
1139  * @needle: The area to search iova
1140  * @errorp: Error pointer
1141  */
1142 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
1143                                     Error **errp)
1144 {
1145     int r;
1146 
1147     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle);
1148     if (unlikely(r != IOVA_OK)) {
1149         error_setg(errp, "Cannot allocate iova (%d)", r);
1150         return false;
1151     }
1152 
1153     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
1154                            needle->size + 1,
1155                            (void *)(uintptr_t)needle->translated_addr,
1156                            needle->perm == IOMMU_RO);
1157     if (unlikely(r != 0)) {
1158         error_setg_errno(errp, -r, "Cannot map region to device");
1159         vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1160     }
1161 
1162     return r == 0;
1163 }
1164 
1165 /**
1166  * Map the shadow virtqueue rings in the device
1167  *
1168  * @dev: The vhost device
1169  * @svq: The shadow virtqueue
1170  * @addr: Assigned IOVA addresses
1171  * @errp: Error pointer
1172  */
1173 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
1174                                      const VhostShadowVirtqueue *svq,
1175                                      struct vhost_vring_addr *addr,
1176                                      Error **errp)
1177 {
1178     ERRP_GUARD();
1179     DMAMap device_region, driver_region;
1180     struct vhost_vring_addr svq_addr;
1181     struct vhost_vdpa *v = dev->opaque;
1182     size_t device_size = vhost_svq_device_area_size(svq);
1183     size_t driver_size = vhost_svq_driver_area_size(svq);
1184     size_t avail_offset;
1185     bool ok;
1186 
1187     vhost_svq_get_vring_addr(svq, &svq_addr);
1188 
1189     driver_region = (DMAMap) {
1190         .translated_addr = svq_addr.desc_user_addr,
1191         .size = driver_size - 1,
1192         .perm = IOMMU_RO,
1193     };
1194     ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
1195     if (unlikely(!ok)) {
1196         error_prepend(errp, "Cannot create vq driver region: ");
1197         return false;
1198     }
1199     addr->desc_user_addr = driver_region.iova;
1200     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
1201     addr->avail_user_addr = driver_region.iova + avail_offset;
1202 
1203     device_region = (DMAMap) {
1204         .translated_addr = svq_addr.used_user_addr,
1205         .size = device_size - 1,
1206         .perm = IOMMU_RW,
1207     };
1208     ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
1209     if (unlikely(!ok)) {
1210         error_prepend(errp, "Cannot create vq device region: ");
1211         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1212     }
1213     addr->used_user_addr = device_region.iova;
1214 
1215     return ok;
1216 }
1217 
1218 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1219                                  VhostShadowVirtqueue *svq, unsigned idx,
1220                                  Error **errp)
1221 {
1222     uint16_t vq_index = dev->vq_index + idx;
1223     struct vhost_vring_state s = {
1224         .index = vq_index,
1225     };
1226     int r;
1227 
1228     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1229     if (unlikely(r)) {
1230         error_setg_errno(errp, -r, "Cannot set vring base");
1231         return false;
1232     }
1233 
1234     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1235     return r == 0;
1236 }
1237 
1238 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1239 {
1240     struct vhost_vdpa *v = dev->opaque;
1241     Error *err = NULL;
1242     unsigned i;
1243 
1244     if (!v->shadow_vqs_enabled) {
1245         return true;
1246     }
1247 
1248     for (i = 0; i < v->shadow_vqs->len; ++i) {
1249         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1250         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1251         struct vhost_vring_addr addr = {
1252             .index = dev->vq_index + i,
1253         };
1254         int r;
1255         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1256         if (unlikely(!ok)) {
1257             goto err;
1258         }
1259 
1260         vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree);
1261         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1262         if (unlikely(!ok)) {
1263             goto err_map;
1264         }
1265 
1266         /* Override vring GPA set by vhost subsystem */
1267         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1268         if (unlikely(r != 0)) {
1269             error_setg_errno(&err, -r, "Cannot set device address");
1270             goto err_set_addr;
1271         }
1272     }
1273 
1274     return true;
1275 
1276 err_set_addr:
1277     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1278 
1279 err_map:
1280     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1281 
1282 err:
1283     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1284     for (unsigned j = 0; j < i; ++j) {
1285         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1286         vhost_vdpa_svq_unmap_rings(dev, svq);
1287         vhost_svq_stop(svq);
1288     }
1289 
1290     return false;
1291 }
1292 
1293 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1294 {
1295     struct vhost_vdpa *v = dev->opaque;
1296 
1297     if (!v->shadow_vqs_enabled) {
1298         return;
1299     }
1300 
1301     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1302         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1303 
1304         vhost_svq_stop(svq);
1305         vhost_vdpa_svq_unmap_rings(dev, svq);
1306 
1307         event_notifier_cleanup(&svq->hdev_kick);
1308         event_notifier_cleanup(&svq->hdev_call);
1309     }
1310 }
1311 
1312 static void vhost_vdpa_suspend(struct vhost_dev *dev)
1313 {
1314     struct vhost_vdpa *v = dev->opaque;
1315     int r;
1316 
1317     if (!vhost_vdpa_first_dev(dev)) {
1318         return;
1319     }
1320 
1321     if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
1322         trace_vhost_vdpa_suspend(dev);
1323         r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND);
1324         if (unlikely(r)) {
1325             error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
1326         } else {
1327             v->suspended = true;
1328             return;
1329         }
1330     }
1331 
1332     vhost_vdpa_reset_device(dev);
1333 }
1334 
1335 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1336 {
1337     struct vhost_vdpa *v = dev->opaque;
1338     bool ok;
1339     trace_vhost_vdpa_dev_start(dev, started);
1340 
1341     if (started) {
1342         vhost_vdpa_host_notifiers_init(dev);
1343         ok = vhost_vdpa_svqs_start(dev);
1344         if (unlikely(!ok)) {
1345             return -1;
1346         }
1347     } else {
1348         vhost_vdpa_suspend(dev);
1349         vhost_vdpa_svqs_stop(dev);
1350         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
1351     }
1352 
1353     if (!vhost_vdpa_last_dev(dev)) {
1354         return 0;
1355     }
1356 
1357     if (started) {
1358         if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) {
1359             error_report("SVQ can not work while IOMMU enable, please disable"
1360                          "IOMMU and try again");
1361             return -1;
1362         }
1363         memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
1364 
1365         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1366     }
1367 
1368     return 0;
1369 }
1370 
1371 static void vhost_vdpa_reset_status(struct vhost_dev *dev)
1372 {
1373     struct vhost_vdpa *v = dev->opaque;
1374 
1375     if (!vhost_vdpa_last_dev(dev)) {
1376         return;
1377     }
1378 
1379     vhost_vdpa_reset_device(dev);
1380     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1381                                VIRTIO_CONFIG_S_DRIVER);
1382     memory_listener_unregister(&v->shared->listener);
1383 }
1384 
1385 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1386                                      struct vhost_log *log)
1387 {
1388     struct vhost_vdpa *v = dev->opaque;
1389     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
1390         return 0;
1391     }
1392 
1393     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1394                                   log->log);
1395     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1396 }
1397 
1398 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1399                                        struct vhost_vring_addr *addr)
1400 {
1401     struct vhost_vdpa *v = dev->opaque;
1402 
1403     if (v->shadow_vqs_enabled) {
1404         /*
1405          * Device vring addr was set at device start. SVQ base is handled by
1406          * VirtQueue code.
1407          */
1408         return 0;
1409     }
1410 
1411     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1412 }
1413 
1414 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1415                                       struct vhost_vring_state *ring)
1416 {
1417     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1418     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1419 }
1420 
1421 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1422                                        struct vhost_vring_state *ring)
1423 {
1424     struct vhost_vdpa *v = dev->opaque;
1425 
1426     if (v->shadow_vqs_enabled) {
1427         /*
1428          * Device vring base was set at device start. SVQ base is handled by
1429          * VirtQueue code.
1430          */
1431         return 0;
1432     }
1433 
1434     return vhost_vdpa_set_dev_vring_base(dev, ring);
1435 }
1436 
1437 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1438                                        struct vhost_vring_state *ring)
1439 {
1440     struct vhost_vdpa *v = dev->opaque;
1441     int ret;
1442 
1443     if (v->shadow_vqs_enabled) {
1444         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
1445         trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true);
1446         return 0;
1447     }
1448 
1449     if (!v->suspended) {
1450         /*
1451          * Cannot trust in value returned by device, let vhost recover used
1452          * idx from guest.
1453          */
1454         return -1;
1455     }
1456 
1457     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1458     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false);
1459     return ret;
1460 }
1461 
1462 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1463                                        struct vhost_vring_file *file)
1464 {
1465     struct vhost_vdpa *v = dev->opaque;
1466     int vdpa_idx = file->index - dev->vq_index;
1467 
1468     if (v->shadow_vqs_enabled) {
1469         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1470         vhost_svq_set_svq_kick_fd(svq, file->fd);
1471         return 0;
1472     } else {
1473         return vhost_vdpa_set_vring_dev_kick(dev, file);
1474     }
1475 }
1476 
1477 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1478                                        struct vhost_vring_file *file)
1479 {
1480     struct vhost_vdpa *v = dev->opaque;
1481     int vdpa_idx = file->index - dev->vq_index;
1482     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1483 
1484     /* Remember last call fd because we can switch to SVQ anytime. */
1485     vhost_svq_set_svq_call_fd(svq, file->fd);
1486     /*
1487      * When SVQ is transitioning to off, shadow_vqs_enabled has
1488      * not been set back to false yet, but the underlying call fd
1489      * will have to switch back to the guest notifier to signal the
1490      * passthrough virtqueues. In other situations, SVQ's own call
1491      * fd shall be used to signal the device model.
1492      */
1493     if (v->shadow_vqs_enabled &&
1494         v->shared->svq_switching != SVQ_TSTATE_DISABLING) {
1495         return 0;
1496     }
1497 
1498     return vhost_vdpa_set_vring_dev_call(dev, file);
1499 }
1500 
1501 static int vhost_vdpa_get_features(struct vhost_dev *dev,
1502                                      uint64_t *features)
1503 {
1504     int ret = vhost_vdpa_get_dev_features(dev, features);
1505 
1506     if (ret == 0) {
1507         /* Add SVQ logging capabilities */
1508         *features |= BIT_ULL(VHOST_F_LOG_ALL);
1509     }
1510 
1511     return ret;
1512 }
1513 
1514 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1515 {
1516     if (!vhost_vdpa_first_dev(dev)) {
1517         return 0;
1518     }
1519 
1520     trace_vhost_vdpa_set_owner(dev);
1521     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1522 }
1523 
1524 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1525                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1526 {
1527     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1528     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1529     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1530     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1531     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1532                                  addr->avail_user_addr, addr->used_user_addr);
1533     return 0;
1534 }
1535 
1536 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1537 {
1538     return true;
1539 }
1540 
1541 const VhostOps vdpa_ops = {
1542         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1543         .vhost_backend_init = vhost_vdpa_init,
1544         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1545         .vhost_set_log_base = vhost_vdpa_set_log_base,
1546         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1547         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1548         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1549         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1550         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1551         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1552         .vhost_get_features = vhost_vdpa_get_features,
1553         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1554         .vhost_set_owner = vhost_vdpa_set_owner,
1555         .vhost_set_vring_endian = NULL,
1556         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1557         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1558         .vhost_set_features = vhost_vdpa_set_features,
1559         .vhost_reset_device = vhost_vdpa_reset_device,
1560         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1561         .vhost_set_vring_enable = vhost_vdpa_set_vring_enable,
1562         .vhost_get_config  = vhost_vdpa_get_config,
1563         .vhost_set_config = vhost_vdpa_set_config,
1564         .vhost_requires_shm_log = NULL,
1565         .vhost_migration_done = NULL,
1566         .vhost_net_set_mtu = NULL,
1567         .vhost_set_iotlb_callback = NULL,
1568         .vhost_send_device_iotlb_msg = NULL,
1569         .vhost_dev_start = vhost_vdpa_dev_start,
1570         .vhost_get_device_id = vhost_vdpa_get_device_id,
1571         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1572         .vhost_force_iommu = vhost_vdpa_force_iommu,
1573         .vhost_set_config_call = vhost_vdpa_set_config_call,
1574         .vhost_reset_status = vhost_vdpa_reset_status,
1575 };
1576