xref: /openbmc/qemu/hw/virtio/vhost-vdpa.c (revision c49d1c37)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "exec/target_page.h"
18 #include "hw/virtio/vhost.h"
19 #include "hw/virtio/vhost-backend.h"
20 #include "hw/virtio/virtio-net.h"
21 #include "hw/virtio/vhost-shadow-virtqueue.h"
22 #include "hw/virtio/vhost-vdpa.h"
23 #include "exec/address-spaces.h"
24 #include "migration/blocker.h"
25 #include "qemu/cutils.h"
26 #include "qemu/main-loop.h"
27 #include "trace.h"
28 #include "qapi/error.h"
29 
30 /*
31  * Return one past the end of the end of section. Be careful with uint64_t
32  * conversions!
33  */
vhost_vdpa_section_end(const MemoryRegionSection * section,int page_mask)34 static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section,
35                                      int page_mask)
36 {
37     Int128 llend = int128_make64(section->offset_within_address_space);
38     llend = int128_add(llend, section->size);
39     llend = int128_and(llend, int128_exts64(page_mask));
40 
41     return llend;
42 }
43 
vhost_vdpa_listener_skipped_section(MemoryRegionSection * section,uint64_t iova_min,uint64_t iova_max,int page_mask)44 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
45                                                 uint64_t iova_min,
46                                                 uint64_t iova_max,
47                                                 int page_mask)
48 {
49     Int128 llend;
50     bool is_ram = memory_region_is_ram(section->mr);
51     bool is_iommu = memory_region_is_iommu(section->mr);
52     bool is_protected = memory_region_is_protected(section->mr);
53 
54     /* vhost-vDPA doesn't allow MMIO to be mapped  */
55     bool is_ram_device = memory_region_is_ram_device(section->mr);
56 
57     if ((!is_ram && !is_iommu) || is_protected || is_ram_device) {
58         trace_vhost_vdpa_skipped_memory_section(is_ram, is_iommu, is_protected,
59                                                 is_ram_device, iova_min,
60                                                 iova_max, page_mask);
61         return true;
62     }
63 
64     if (section->offset_within_address_space < iova_min) {
65         error_report("RAM section out of device range (min=0x%" PRIx64
66                      ", addr=0x%" HWADDR_PRIx ")",
67                      iova_min, section->offset_within_address_space);
68         return true;
69     }
70     /*
71      * While using vIOMMU, sometimes the section will be larger than iova_max,
72      * but the memory that actually maps is smaller, so move the check to
73      * function vhost_vdpa_iommu_map_notify(). That function will use the actual
74      * size that maps to the kernel
75      */
76 
77     if (!is_iommu) {
78         llend = vhost_vdpa_section_end(section, page_mask);
79         if (int128_gt(llend, int128_make64(iova_max))) {
80             error_report("RAM section out of device range (max=0x%" PRIx64
81                          ", end addr=0x%" PRIx64 ")",
82                          iova_max, int128_get64(llend));
83             return true;
84         }
85     }
86 
87     return false;
88 }
89 
90 /*
91  * The caller must set asid = 0 if the device does not support asid.
92  * This is not an ABI break since it is set to 0 by the initializer anyway.
93  */
vhost_vdpa_dma_map(VhostVDPAShared * s,uint32_t asid,hwaddr iova,hwaddr size,void * vaddr,bool readonly)94 int vhost_vdpa_dma_map(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
95                        hwaddr size, void *vaddr, bool readonly)
96 {
97     struct vhost_msg_v2 msg = {};
98     int fd = s->device_fd;
99     int ret = 0;
100 
101     msg.type = VHOST_IOTLB_MSG_V2;
102     msg.asid = asid;
103     msg.iotlb.iova = iova;
104     msg.iotlb.size = size;
105     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
106     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
107     msg.iotlb.type = VHOST_IOTLB_UPDATE;
108 
109     trace_vhost_vdpa_dma_map(s, fd, msg.type, msg.asid, msg.iotlb.iova,
110                              msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
111                              msg.iotlb.type);
112 
113     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114         error_report("failed to write, fd=%d, errno=%d (%s)",
115             fd, errno, strerror(errno));
116         return -EIO ;
117     }
118 
119     return ret;
120 }
121 
122 /*
123  * The caller must set asid = 0 if the device does not support asid.
124  * This is not an ABI break since it is set to 0 by the initializer anyway.
125  */
vhost_vdpa_dma_unmap(VhostVDPAShared * s,uint32_t asid,hwaddr iova,hwaddr size)126 int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
127                          hwaddr size)
128 {
129     struct vhost_msg_v2 msg = {};
130     int fd = s->device_fd;
131     int ret = 0;
132 
133     msg.type = VHOST_IOTLB_MSG_V2;
134     msg.asid = asid;
135     msg.iotlb.iova = iova;
136     msg.iotlb.size = size;
137     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
138 
139     trace_vhost_vdpa_dma_unmap(s, fd, msg.type, msg.asid, msg.iotlb.iova,
140                                msg.iotlb.size, msg.iotlb.type);
141 
142     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
143         error_report("failed to write, fd=%d, errno=%d (%s)",
144             fd, errno, strerror(errno));
145         return -EIO ;
146     }
147 
148     return ret;
149 }
150 
vhost_vdpa_listener_begin_batch(VhostVDPAShared * s)151 static void vhost_vdpa_listener_begin_batch(VhostVDPAShared *s)
152 {
153     int fd = s->device_fd;
154     struct vhost_msg_v2 msg = {
155         .type = VHOST_IOTLB_MSG_V2,
156         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
157     };
158 
159     trace_vhost_vdpa_listener_begin_batch(s, fd, msg.type, msg.iotlb.type);
160     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
161         error_report("failed to write, fd=%d, errno=%d (%s)",
162                      fd, errno, strerror(errno));
163     }
164 }
165 
vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared * s)166 static void vhost_vdpa_iotlb_batch_begin_once(VhostVDPAShared *s)
167 {
168     if (s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
169         !s->iotlb_batch_begin_sent) {
170         vhost_vdpa_listener_begin_batch(s);
171     }
172 
173     s->iotlb_batch_begin_sent = true;
174 }
175 
vhost_vdpa_listener_commit(MemoryListener * listener)176 static void vhost_vdpa_listener_commit(MemoryListener *listener)
177 {
178     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
179     struct vhost_msg_v2 msg = {};
180     int fd = s->device_fd;
181 
182     if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
183         return;
184     }
185 
186     if (!s->iotlb_batch_begin_sent) {
187         return;
188     }
189 
190     msg.type = VHOST_IOTLB_MSG_V2;
191     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
192 
193     trace_vhost_vdpa_listener_commit(s, fd, msg.type, msg.iotlb.type);
194     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
195         error_report("failed to write, fd=%d, errno=%d (%s)",
196                      fd, errno, strerror(errno));
197     }
198 
199     s->iotlb_batch_begin_sent = false;
200 }
201 
vhost_vdpa_iommu_map_notify(IOMMUNotifier * n,IOMMUTLBEntry * iotlb)202 static void vhost_vdpa_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
203 {
204     struct vdpa_iommu *iommu = container_of(n, struct vdpa_iommu, n);
205 
206     hwaddr iova = iotlb->iova + iommu->iommu_offset;
207     VhostVDPAShared *s = iommu->dev_shared;
208     void *vaddr;
209     int ret;
210     Int128 llend;
211     Error *local_err = NULL;
212 
213     if (iotlb->target_as != &address_space_memory) {
214         error_report("Wrong target AS \"%s\", only system memory is allowed",
215                      iotlb->target_as->name ? iotlb->target_as->name : "none");
216         return;
217     }
218     RCU_READ_LOCK_GUARD();
219     /* check if RAM section out of device range */
220     llend = int128_add(int128_makes64(iotlb->addr_mask), int128_makes64(iova));
221     if (int128_gt(llend, int128_make64(s->iova_range.last))) {
222         error_report("RAM section out of device range (max=0x%" PRIx64
223                      ", end addr=0x%" PRIx64 ")",
224                      s->iova_range.last, int128_get64(llend));
225         return;
226     }
227 
228     if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) {
229         bool read_only;
230 
231         if (!memory_get_xlat_addr(iotlb, &vaddr, NULL, &read_only, NULL,
232                                   &local_err)) {
233             error_report_err(local_err);
234             return;
235         }
236         ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
237                                  iotlb->addr_mask + 1, vaddr, read_only);
238         if (ret) {
239             error_report("vhost_vdpa_dma_map(%p, 0x%" HWADDR_PRIx ", "
240                          "0x%" HWADDR_PRIx ", %p) = %d (%m)",
241                          s, iova, iotlb->addr_mask + 1, vaddr, ret);
242         }
243     } else {
244         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
245                                    iotlb->addr_mask + 1);
246         if (ret) {
247             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
248                          "0x%" HWADDR_PRIx ") = %d (%m)",
249                          s, iova, iotlb->addr_mask + 1, ret);
250         }
251     }
252 }
253 
vhost_vdpa_iommu_region_add(MemoryListener * listener,MemoryRegionSection * section)254 static void vhost_vdpa_iommu_region_add(MemoryListener *listener,
255                                         MemoryRegionSection *section)
256 {
257     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
258 
259     struct vdpa_iommu *iommu;
260     Int128 end;
261     int iommu_idx;
262     IOMMUMemoryRegion *iommu_mr;
263     int ret;
264 
265     iommu_mr = IOMMU_MEMORY_REGION(section->mr);
266 
267     iommu = g_malloc0(sizeof(*iommu));
268     end = int128_add(int128_make64(section->offset_within_region),
269                      section->size);
270     end = int128_sub(end, int128_one());
271     iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
272                                                    MEMTXATTRS_UNSPECIFIED);
273     iommu->iommu_mr = iommu_mr;
274     iommu_notifier_init(&iommu->n, vhost_vdpa_iommu_map_notify,
275                         IOMMU_NOTIFIER_IOTLB_EVENTS,
276                         section->offset_within_region,
277                         int128_get64(end),
278                         iommu_idx);
279     iommu->iommu_offset = section->offset_within_address_space -
280                           section->offset_within_region;
281     iommu->dev_shared = s;
282 
283     ret = memory_region_register_iommu_notifier(section->mr, &iommu->n, NULL);
284     if (ret) {
285         g_free(iommu);
286         return;
287     }
288 
289     QLIST_INSERT_HEAD(&s->iommu_list, iommu, iommu_next);
290     memory_region_iommu_replay(iommu->iommu_mr, &iommu->n);
291 
292     return;
293 }
294 
vhost_vdpa_iommu_region_del(MemoryListener * listener,MemoryRegionSection * section)295 static void vhost_vdpa_iommu_region_del(MemoryListener *listener,
296                                         MemoryRegionSection *section)
297 {
298     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
299 
300     struct vdpa_iommu *iommu;
301 
302     QLIST_FOREACH(iommu, &s->iommu_list, iommu_next)
303     {
304         if (MEMORY_REGION(iommu->iommu_mr) == section->mr &&
305             iommu->n.start == section->offset_within_region) {
306             memory_region_unregister_iommu_notifier(section->mr, &iommu->n);
307             QLIST_REMOVE(iommu, iommu_next);
308             g_free(iommu);
309             break;
310         }
311     }
312 }
313 
vhost_vdpa_listener_region_add(MemoryListener * listener,MemoryRegionSection * section)314 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
315                                            MemoryRegionSection *section)
316 {
317     DMAMap mem_region = {};
318     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
319     hwaddr iova;
320     Int128 llend, llsize;
321     void *vaddr;
322     int ret;
323     int page_size = qemu_target_page_size();
324     int page_mask = -page_size;
325 
326     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
327                                             s->iova_range.last, page_mask)) {
328         return;
329     }
330     if (memory_region_is_iommu(section->mr)) {
331         vhost_vdpa_iommu_region_add(listener, section);
332         return;
333     }
334 
335     if (unlikely((section->offset_within_address_space & ~page_mask) !=
336                  (section->offset_within_region & ~page_mask))) {
337         trace_vhost_vdpa_listener_region_add_unaligned(s, section->mr->name,
338                        section->offset_within_address_space & ~page_mask,
339                        section->offset_within_region & ~page_mask);
340         return;
341     }
342 
343     iova = ROUND_UP(section->offset_within_address_space, page_size);
344     llend = vhost_vdpa_section_end(section, page_mask);
345     if (int128_ge(int128_make64(iova), llend)) {
346         return;
347     }
348 
349     memory_region_ref(section->mr);
350 
351     /* Here we assume that memory_region_is_ram(section->mr)==true */
352 
353     vaddr = memory_region_get_ram_ptr(section->mr) +
354             section->offset_within_region +
355             (iova - section->offset_within_address_space);
356 
357     trace_vhost_vdpa_listener_region_add(s, iova, int128_get64(llend),
358                                          vaddr, section->readonly);
359 
360     llsize = int128_sub(llend, int128_make64(iova));
361     if (s->shadow_data) {
362         int r;
363 
364         mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
365         mem_region.size = int128_get64(llsize) - 1,
366         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
367 
368         r = vhost_iova_tree_map_alloc(s->iova_tree, &mem_region);
369         if (unlikely(r != IOVA_OK)) {
370             error_report("Can't allocate a mapping (%d)", r);
371             goto fail;
372         }
373 
374         iova = mem_region.iova;
375     }
376 
377     vhost_vdpa_iotlb_batch_begin_once(s);
378     ret = vhost_vdpa_dma_map(s, VHOST_VDPA_GUEST_PA_ASID, iova,
379                              int128_get64(llsize), vaddr, section->readonly);
380     if (ret) {
381         error_report("vhost vdpa map fail!");
382         goto fail_map;
383     }
384 
385     return;
386 
387 fail_map:
388     if (s->shadow_data) {
389         vhost_iova_tree_remove(s->iova_tree, mem_region);
390     }
391 
392 fail:
393     /*
394      * On the initfn path, store the first error in the container so we
395      * can gracefully fail.  Runtime, there's not much we can do other
396      * than throw a hardware error.
397      */
398     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
399     return;
400 
401 }
402 
vhost_vdpa_listener_region_del(MemoryListener * listener,MemoryRegionSection * section)403 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
404                                            MemoryRegionSection *section)
405 {
406     VhostVDPAShared *s = container_of(listener, VhostVDPAShared, listener);
407     hwaddr iova;
408     Int128 llend, llsize;
409     int ret;
410     int page_size = qemu_target_page_size();
411     int page_mask = -page_size;
412 
413     if (vhost_vdpa_listener_skipped_section(section, s->iova_range.first,
414                                             s->iova_range.last, page_mask)) {
415         return;
416     }
417     if (memory_region_is_iommu(section->mr)) {
418         vhost_vdpa_iommu_region_del(listener, section);
419     }
420 
421     if (unlikely((section->offset_within_address_space & ~page_mask) !=
422                  (section->offset_within_region & ~page_mask))) {
423         trace_vhost_vdpa_listener_region_del_unaligned(s, section->mr->name,
424                        section->offset_within_address_space & ~page_mask,
425                        section->offset_within_region & ~page_mask);
426         return;
427     }
428 
429     iova = ROUND_UP(section->offset_within_address_space, page_size);
430     llend = vhost_vdpa_section_end(section, page_mask);
431 
432     trace_vhost_vdpa_listener_region_del(s, iova,
433         int128_get64(int128_sub(llend, int128_one())));
434 
435     if (int128_ge(int128_make64(iova), llend)) {
436         return;
437     }
438 
439     llsize = int128_sub(llend, int128_make64(iova));
440 
441     if (s->shadow_data) {
442         const DMAMap *result;
443         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
444             section->offset_within_region +
445             (iova - section->offset_within_address_space);
446         DMAMap mem_region = {
447             .translated_addr = (hwaddr)(uintptr_t)vaddr,
448             .size = int128_get64(llsize) - 1,
449         };
450 
451         result = vhost_iova_tree_find_iova(s->iova_tree, &mem_region);
452         if (!result) {
453             /* The memory listener map wasn't mapped */
454             return;
455         }
456         iova = result->iova;
457         vhost_iova_tree_remove(s->iova_tree, *result);
458     }
459     vhost_vdpa_iotlb_batch_begin_once(s);
460     /*
461      * The unmap ioctl doesn't accept a full 64-bit. need to check it
462      */
463     if (int128_eq(llsize, int128_2_64())) {
464         llsize = int128_rshift(llsize, 1);
465         ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
466                                    int128_get64(llsize));
467 
468         if (ret) {
469             error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
470                          "0x%" HWADDR_PRIx ") = %d (%m)",
471                          s, iova, int128_get64(llsize), ret);
472         }
473         iova += int128_get64(llsize);
474     }
475     ret = vhost_vdpa_dma_unmap(s, VHOST_VDPA_GUEST_PA_ASID, iova,
476                                int128_get64(llsize));
477 
478     if (ret) {
479         error_report("vhost_vdpa_dma_unmap(%p, 0x%" HWADDR_PRIx ", "
480                      "0x%" HWADDR_PRIx ") = %d (%m)",
481                      s, iova, int128_get64(llsize), ret);
482     }
483 
484     memory_region_unref(section->mr);
485 }
486 /*
487  * IOTLB API is used by vhost-vdpa which requires incremental updating
488  * of the mapping. So we can not use generic vhost memory listener which
489  * depends on the addnop().
490  */
491 static const MemoryListener vhost_vdpa_memory_listener = {
492     .name = "vhost-vdpa",
493     .commit = vhost_vdpa_listener_commit,
494     .region_add = vhost_vdpa_listener_region_add,
495     .region_del = vhost_vdpa_listener_region_del,
496 };
497 
vhost_vdpa_call(struct vhost_dev * dev,unsigned long int request,void * arg)498 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
499                              void *arg)
500 {
501     struct vhost_vdpa *v = dev->opaque;
502     int fd = v->shared->device_fd;
503     int ret;
504 
505     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
506 
507     ret = ioctl(fd, request, arg);
508     return ret < 0 ? -errno : ret;
509 }
510 
vhost_vdpa_add_status(struct vhost_dev * dev,uint8_t status)511 static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
512 {
513     uint8_t s;
514     int ret;
515 
516     trace_vhost_vdpa_add_status(dev, status);
517     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
518     if (ret < 0) {
519         return ret;
520     }
521     if ((s & status) == status) {
522         /* Don't set bits already set */
523         return 0;
524     }
525 
526     s |= status;
527 
528     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
529     if (ret < 0) {
530         return ret;
531     }
532 
533     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
534     if (ret < 0) {
535         return ret;
536     }
537 
538     if (!(s & status)) {
539         return -EIO;
540     }
541 
542     return 0;
543 }
544 
vhost_vdpa_get_iova_range(int fd,struct vhost_vdpa_iova_range * iova_range)545 int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range)
546 {
547     int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range);
548 
549     return ret < 0 ? -errno : 0;
550 }
551 
552 /*
553  * The use of this function is for requests that only need to be
554  * applied once. Typically such request occurs at the beginning
555  * of operation, and before setting up queues. It should not be
556  * used for request that performs operation until all queues are
557  * set, which would need to check dev->vq_index_end instead.
558  */
vhost_vdpa_first_dev(struct vhost_dev * dev)559 static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
560 {
561     struct vhost_vdpa *v = dev->opaque;
562 
563     return v->index == 0;
564 }
565 
vhost_vdpa_last_dev(struct vhost_dev * dev)566 static bool vhost_vdpa_last_dev(struct vhost_dev *dev)
567 {
568     return dev->vq_index + dev->nvqs == dev->vq_index_end;
569 }
570 
vhost_vdpa_get_dev_features(struct vhost_dev * dev,uint64_t * features)571 static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
572                                        uint64_t *features)
573 {
574     int ret;
575 
576     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
577     trace_vhost_vdpa_get_features(dev, *features);
578     return ret;
579 }
580 
vhost_vdpa_init_svq(struct vhost_dev * hdev,struct vhost_vdpa * v)581 static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
582 {
583     g_autoptr(GPtrArray) shadow_vqs = NULL;
584 
585     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
586     for (unsigned n = 0; n < hdev->nvqs; ++n) {
587         VhostShadowVirtqueue *svq;
588 
589         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
590         g_ptr_array_add(shadow_vqs, svq);
591     }
592 
593     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
594 }
595 
vhost_vdpa_init(struct vhost_dev * dev,void * opaque,Error ** errp)596 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
597 {
598     struct vhost_vdpa *v = opaque;
599     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
600     trace_vhost_vdpa_init(dev, v->shared, opaque);
601     int ret;
602 
603     v->dev = dev;
604     dev->opaque =  opaque ;
605     v->shared->listener = vhost_vdpa_memory_listener;
606     vhost_vdpa_init_svq(dev, v);
607 
608     error_propagate(&dev->migration_blocker, v->migration_blocker);
609     if (!vhost_vdpa_first_dev(dev)) {
610         return 0;
611     }
612 
613     /*
614      * If dev->shadow_vqs_enabled at initialization that means the device has
615      * been started with x-svq=on, so don't block migration
616      */
617     if (dev->migration_blocker == NULL && !v->shadow_vqs_enabled) {
618         /* We don't have dev->features yet */
619         uint64_t features;
620         ret = vhost_vdpa_get_dev_features(dev, &features);
621         if (unlikely(ret)) {
622             error_setg_errno(errp, -ret, "Could not get device features");
623             return ret;
624         }
625         vhost_svq_valid_features(features, &dev->migration_blocker);
626     }
627 
628     /*
629      * Similar to VFIO, we end up pinning all guest memory and have to
630      * disable discarding of RAM.
631      */
632     ret = ram_block_discard_disable(true);
633     if (ret) {
634         error_report("Cannot set discarding of RAM broken");
635         return ret;
636     }
637 
638     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
639                                VIRTIO_CONFIG_S_DRIVER);
640 
641     return 0;
642 }
643 
vhost_vdpa_host_notifier_uninit(struct vhost_dev * dev,int queue_index)644 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
645                                             int queue_index)
646 {
647     size_t page_size = qemu_real_host_page_size();
648     struct vhost_vdpa *v = dev->opaque;
649     VirtIODevice *vdev = dev->vdev;
650     VhostVDPAHostNotifier *n;
651 
652     n = &v->notifier[queue_index];
653 
654     if (n->addr) {
655         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
656         object_unparent(OBJECT(&n->mr));
657         munmap(n->addr, page_size);
658         n->addr = NULL;
659     }
660 }
661 
vhost_vdpa_host_notifier_init(struct vhost_dev * dev,int queue_index)662 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
663 {
664     size_t page_size = qemu_real_host_page_size();
665     struct vhost_vdpa *v = dev->opaque;
666     VirtIODevice *vdev = dev->vdev;
667     VhostVDPAHostNotifier *n;
668     int fd = v->shared->device_fd;
669     void *addr;
670     char *name;
671 
672     vhost_vdpa_host_notifier_uninit(dev, queue_index);
673 
674     n = &v->notifier[queue_index];
675 
676     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
677                 queue_index * page_size);
678     if (addr == MAP_FAILED) {
679         goto err;
680     }
681 
682     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
683                            v, queue_index);
684     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
685                                       page_size, addr);
686     g_free(name);
687 
688     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
689         object_unparent(OBJECT(&n->mr));
690         munmap(addr, page_size);
691         goto err;
692     }
693     n->addr = addr;
694 
695     return 0;
696 
697 err:
698     return -1;
699 }
700 
vhost_vdpa_host_notifiers_uninit(struct vhost_dev * dev,int n)701 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
702 {
703     int i;
704 
705     /*
706      * Pack all the changes to the memory regions in a single
707      * transaction to avoid a few updating of the address space
708      * topology.
709      */
710     memory_region_transaction_begin();
711 
712     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
713         vhost_vdpa_host_notifier_uninit(dev, i);
714     }
715 
716     memory_region_transaction_commit();
717 }
718 
vhost_vdpa_host_notifiers_init(struct vhost_dev * dev)719 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
720 {
721     struct vhost_vdpa *v = dev->opaque;
722     int i;
723 
724     if (v->shadow_vqs_enabled) {
725         /* FIXME SVQ is not compatible with host notifiers mr */
726         return;
727     }
728 
729     /*
730      * Pack all the changes to the memory regions in a single
731      * transaction to avoid a few updating of the address space
732      * topology.
733      */
734     memory_region_transaction_begin();
735 
736     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
737         if (vhost_vdpa_host_notifier_init(dev, i)) {
738             vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
739             break;
740         }
741     }
742 
743     memory_region_transaction_commit();
744 }
745 
vhost_vdpa_svq_cleanup(struct vhost_dev * dev)746 static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
747 {
748     struct vhost_vdpa *v = dev->opaque;
749     size_t idx;
750 
751     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
752         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
753     }
754     g_ptr_array_free(v->shadow_vqs, true);
755 }
756 
vhost_vdpa_cleanup(struct vhost_dev * dev)757 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
758 {
759     struct vhost_vdpa *v;
760     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
761     v = dev->opaque;
762     trace_vhost_vdpa_cleanup(dev, v);
763     if (vhost_vdpa_first_dev(dev)) {
764         ram_block_discard_disable(false);
765         memory_listener_unregister(&v->shared->listener);
766     }
767 
768     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
769     vhost_vdpa_svq_cleanup(dev);
770 
771     dev->opaque = NULL;
772 
773     return 0;
774 }
775 
vhost_vdpa_memslots_limit(struct vhost_dev * dev)776 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
777 {
778     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
779     return INT_MAX;
780 }
781 
vhost_vdpa_set_mem_table(struct vhost_dev * dev,struct vhost_memory * mem)782 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
783                                     struct vhost_memory *mem)
784 {
785     if (!vhost_vdpa_first_dev(dev)) {
786         return 0;
787     }
788 
789     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
790     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
791         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
792         int i;
793         for (i = 0; i < mem->nregions; i++) {
794             trace_vhost_vdpa_dump_regions(dev, i,
795                                           mem->regions[i].guest_phys_addr,
796                                           mem->regions[i].memory_size,
797                                           mem->regions[i].userspace_addr,
798                                           mem->regions[i].flags_padding);
799         }
800     }
801     if (mem->padding) {
802         return -EINVAL;
803     }
804 
805     return 0;
806 }
807 
vhost_vdpa_set_features(struct vhost_dev * dev,uint64_t features)808 static int vhost_vdpa_set_features(struct vhost_dev *dev,
809                                    uint64_t features)
810 {
811     struct vhost_vdpa *v = dev->opaque;
812     int ret;
813 
814     if (!vhost_vdpa_first_dev(dev)) {
815         return 0;
816     }
817 
818     if (v->shadow_vqs_enabled) {
819         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
820             /*
821              * QEMU is just trying to enable or disable logging. SVQ handles
822              * this sepparately, so no need to forward this.
823              */
824             v->acked_features = features;
825             return 0;
826         }
827 
828         v->acked_features = features;
829 
830         /* We must not ack _F_LOG if SVQ is enabled */
831         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
832     }
833 
834     trace_vhost_vdpa_set_features(dev, features);
835     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
836     if (ret) {
837         return ret;
838     }
839 
840     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
841 }
842 
vhost_vdpa_set_backend_cap(struct vhost_dev * dev)843 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
844 {
845     struct vhost_vdpa *v = dev->opaque;
846 
847     uint64_t features;
848     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
849         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
850         0x1ULL << VHOST_BACKEND_F_IOTLB_ASID |
851         0x1ULL << VHOST_BACKEND_F_SUSPEND;
852     int r;
853 
854     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
855         return -EFAULT;
856     }
857 
858     features &= f;
859 
860     if (vhost_vdpa_first_dev(dev)) {
861         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
862         if (r) {
863             return -EFAULT;
864         }
865     }
866 
867     dev->backend_cap = features;
868     v->shared->backend_cap = features;
869 
870     return 0;
871 }
872 
vhost_vdpa_get_device_id(struct vhost_dev * dev,uint32_t * device_id)873 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
874                                     uint32_t *device_id)
875 {
876     int ret;
877     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
878     trace_vhost_vdpa_get_device_id(dev, *device_id);
879     return ret;
880 }
881 
vhost_vdpa_reset_device(struct vhost_dev * dev)882 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
883 {
884     struct vhost_vdpa *v = dev->opaque;
885     int ret;
886     uint8_t status = 0;
887 
888     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
889     trace_vhost_vdpa_reset_device(dev);
890     v->suspended = false;
891     return ret;
892 }
893 
vhost_vdpa_get_vq_index(struct vhost_dev * dev,int idx)894 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
895 {
896     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
897 
898     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
899     return idx;
900 }
901 
vhost_vdpa_set_vring_enable_one(struct vhost_vdpa * v,unsigned idx,int enable)902 static int vhost_vdpa_set_vring_enable_one(struct vhost_vdpa *v, unsigned idx,
903                                            int enable)
904 {
905     struct vhost_dev *dev = v->dev;
906     struct vhost_vring_state state = {
907         .index = idx,
908         .num = enable,
909     };
910     int r = vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
911 
912     trace_vhost_vdpa_set_vring_enable_one(dev, idx, enable, r);
913     return r;
914 }
915 
vhost_vdpa_set_vring_enable(struct vhost_dev * dev,int enable)916 static int vhost_vdpa_set_vring_enable(struct vhost_dev *dev, int enable)
917 {
918     struct vhost_vdpa *v = dev->opaque;
919     unsigned int i;
920     int ret;
921 
922     for (i = 0; i < dev->nvqs; ++i) {
923         ret = vhost_vdpa_set_vring_enable_one(v, i, enable);
924         if (ret < 0) {
925             return ret;
926         }
927     }
928 
929     return 0;
930 }
931 
vhost_vdpa_set_vring_ready(struct vhost_vdpa * v,unsigned idx)932 int vhost_vdpa_set_vring_ready(struct vhost_vdpa *v, unsigned idx)
933 {
934     return vhost_vdpa_set_vring_enable_one(v, idx, 1);
935 }
936 
vhost_vdpa_set_config_call(struct vhost_dev * dev,int fd)937 static int vhost_vdpa_set_config_call(struct vhost_dev *dev,
938                                        int fd)
939 {
940     trace_vhost_vdpa_set_config_call(dev, fd);
941     return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd);
942 }
943 
vhost_vdpa_dump_config(struct vhost_dev * dev,const uint8_t * config,uint32_t config_len)944 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
945                                    uint32_t config_len)
946 {
947     g_autoptr(GString) str = g_string_sized_new(4 * 16);
948     size_t b, len;
949 
950     for (b = 0; b < config_len; b += len) {
951         len = MIN(config_len - b, 16);
952 
953         g_string_truncate(str, 0);
954         qemu_hexdump_line(str, config + b, len, 1, 4);
955         trace_vhost_vdpa_dump_config(dev, b, str->str);
956     }
957 }
958 
vhost_vdpa_set_config(struct vhost_dev * dev,const uint8_t * data,uint32_t offset,uint32_t size,uint32_t flags)959 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
960                                    uint32_t offset, uint32_t size,
961                                    uint32_t flags)
962 {
963     struct vhost_vdpa_config *config;
964     int ret;
965     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
966 
967     trace_vhost_vdpa_set_config(dev, offset, size, flags);
968     config = g_malloc(size + config_size);
969     config->off = offset;
970     config->len = size;
971     memcpy(config->buf, data, size);
972     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
973         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
974         vhost_vdpa_dump_config(dev, data, size);
975     }
976     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
977     g_free(config);
978     return ret;
979 }
980 
vhost_vdpa_get_config(struct vhost_dev * dev,uint8_t * config,uint32_t config_len,Error ** errp)981 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
982                                    uint32_t config_len, Error **errp)
983 {
984     struct vhost_vdpa_config *v_config;
985     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
986     int ret;
987 
988     trace_vhost_vdpa_get_config(dev, config, config_len);
989     v_config = g_malloc(config_len + config_size);
990     v_config->len = config_len;
991     v_config->off = 0;
992     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
993     memcpy(config, v_config->buf, config_len);
994     g_free(v_config);
995     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
996         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
997         vhost_vdpa_dump_config(dev, config, config_len);
998     }
999     return ret;
1000  }
1001 
vhost_vdpa_set_dev_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)1002 static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
1003                                          struct vhost_vring_state *ring)
1004 {
1005     struct vhost_vdpa *v = dev->opaque;
1006 
1007     trace_vhost_vdpa_set_dev_vring_base(dev, ring->index, ring->num,
1008                                         v->shadow_vqs_enabled);
1009     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
1010 }
1011 
vhost_vdpa_set_vring_dev_kick(struct vhost_dev * dev,struct vhost_vring_file * file)1012 static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
1013                                          struct vhost_vring_file *file)
1014 {
1015     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
1016     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
1017 }
1018 
vhost_vdpa_set_vring_dev_call(struct vhost_dev * dev,struct vhost_vring_file * file)1019 static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
1020                                          struct vhost_vring_file *file)
1021 {
1022     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
1023     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
1024 }
1025 
vhost_vdpa_set_vring_dev_addr(struct vhost_dev * dev,struct vhost_vring_addr * addr)1026 static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
1027                                          struct vhost_vring_addr *addr)
1028 {
1029     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
1030                                 addr->desc_user_addr, addr->used_user_addr,
1031                                 addr->avail_user_addr,
1032                                 addr->log_guest_addr);
1033 
1034     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
1035 
1036 }
1037 
1038 /**
1039  * Set the shadow virtqueue descriptors to the device
1040  *
1041  * @dev: The vhost device model
1042  * @svq: The shadow virtqueue
1043  * @idx: The index of the virtqueue in the vhost device
1044  * @errp: Error
1045  *
1046  * Note that this function does not rewind kick file descriptor if cannot set
1047  * call one.
1048  */
vhost_vdpa_svq_set_fds(struct vhost_dev * dev,VhostShadowVirtqueue * svq,unsigned idx,Error ** errp)1049 static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
1050                                   VhostShadowVirtqueue *svq, unsigned idx,
1051                                   Error **errp)
1052 {
1053     struct vhost_vring_file file = {
1054         .index = dev->vq_index + idx,
1055     };
1056     const EventNotifier *event_notifier = &svq->hdev_kick;
1057     int r;
1058 
1059     r = event_notifier_init(&svq->hdev_kick, 0);
1060     if (r != 0) {
1061         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
1062         goto err_init_hdev_kick;
1063     }
1064 
1065     r = event_notifier_init(&svq->hdev_call, 0);
1066     if (r != 0) {
1067         error_setg_errno(errp, -r, "Couldn't create call event notifier");
1068         goto err_init_hdev_call;
1069     }
1070 
1071     file.fd = event_notifier_get_fd(event_notifier);
1072     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
1073     if (unlikely(r != 0)) {
1074         error_setg_errno(errp, -r, "Can't set device kick fd");
1075         goto err_init_set_dev_fd;
1076     }
1077 
1078     event_notifier = &svq->hdev_call;
1079     file.fd = event_notifier_get_fd(event_notifier);
1080     r = vhost_vdpa_set_vring_dev_call(dev, &file);
1081     if (unlikely(r != 0)) {
1082         error_setg_errno(errp, -r, "Can't set device call fd");
1083         goto err_init_set_dev_fd;
1084     }
1085 
1086     return 0;
1087 
1088 err_init_set_dev_fd:
1089     event_notifier_set_handler(&svq->hdev_call, NULL);
1090 
1091 err_init_hdev_call:
1092     event_notifier_cleanup(&svq->hdev_kick);
1093 
1094 err_init_hdev_kick:
1095     return r;
1096 }
1097 
1098 /**
1099  * Unmap a SVQ area in the device
1100  */
vhost_vdpa_svq_unmap_ring(struct vhost_vdpa * v,hwaddr addr)1101 static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
1102 {
1103     const DMAMap needle = {
1104         .translated_addr = addr,
1105     };
1106     const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree,
1107                                                      &needle);
1108     hwaddr size;
1109     int r;
1110 
1111     if (unlikely(!result)) {
1112         error_report("Unable to find SVQ address to unmap");
1113         return;
1114     }
1115 
1116     size = ROUND_UP(result->size, qemu_real_host_page_size());
1117     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, result->iova,
1118                              size);
1119     if (unlikely(r < 0)) {
1120         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
1121         return;
1122     }
1123 
1124     vhost_iova_tree_remove(v->shared->iova_tree, *result);
1125 }
1126 
vhost_vdpa_svq_unmap_rings(struct vhost_dev * dev,const VhostShadowVirtqueue * svq)1127 static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
1128                                        const VhostShadowVirtqueue *svq)
1129 {
1130     struct vhost_vdpa *v = dev->opaque;
1131     struct vhost_vring_addr svq_addr;
1132 
1133     vhost_svq_get_vring_addr(svq, &svq_addr);
1134 
1135     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
1136 
1137     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
1138 }
1139 
1140 /**
1141  * Map the SVQ area in the device
1142  *
1143  * @v: Vhost-vdpa device
1144  * @needle: The area to search iova
1145  * @errorp: Error pointer
1146  */
vhost_vdpa_svq_map_ring(struct vhost_vdpa * v,DMAMap * needle,Error ** errp)1147 static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
1148                                     Error **errp)
1149 {
1150     int r;
1151 
1152     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle);
1153     if (unlikely(r != IOVA_OK)) {
1154         error_setg(errp, "Cannot allocate iova (%d)", r);
1155         return false;
1156     }
1157 
1158     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, needle->iova,
1159                            needle->size + 1,
1160                            (void *)(uintptr_t)needle->translated_addr,
1161                            needle->perm == IOMMU_RO);
1162     if (unlikely(r != 0)) {
1163         error_setg_errno(errp, -r, "Cannot map region to device");
1164         vhost_iova_tree_remove(v->shared->iova_tree, *needle);
1165     }
1166 
1167     return r == 0;
1168 }
1169 
1170 /**
1171  * Map the shadow virtqueue rings in the device
1172  *
1173  * @dev: The vhost device
1174  * @svq: The shadow virtqueue
1175  * @addr: Assigned IOVA addresses
1176  * @errp: Error pointer
1177  */
vhost_vdpa_svq_map_rings(struct vhost_dev * dev,const VhostShadowVirtqueue * svq,struct vhost_vring_addr * addr,Error ** errp)1178 static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
1179                                      const VhostShadowVirtqueue *svq,
1180                                      struct vhost_vring_addr *addr,
1181                                      Error **errp)
1182 {
1183     ERRP_GUARD();
1184     DMAMap device_region, driver_region;
1185     struct vhost_vring_addr svq_addr;
1186     struct vhost_vdpa *v = dev->opaque;
1187     size_t device_size = vhost_svq_device_area_size(svq);
1188     size_t driver_size = vhost_svq_driver_area_size(svq);
1189     size_t avail_offset;
1190     bool ok;
1191 
1192     vhost_svq_get_vring_addr(svq, &svq_addr);
1193 
1194     driver_region = (DMAMap) {
1195         .translated_addr = svq_addr.desc_user_addr,
1196         .size = driver_size - 1,
1197         .perm = IOMMU_RO,
1198     };
1199     ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
1200     if (unlikely(!ok)) {
1201         error_prepend(errp, "Cannot create vq driver region: ");
1202         return false;
1203     }
1204     addr->desc_user_addr = driver_region.iova;
1205     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
1206     addr->avail_user_addr = driver_region.iova + avail_offset;
1207 
1208     device_region = (DMAMap) {
1209         .translated_addr = svq_addr.used_user_addr,
1210         .size = device_size - 1,
1211         .perm = IOMMU_RW,
1212     };
1213     ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
1214     if (unlikely(!ok)) {
1215         error_prepend(errp, "Cannot create vq device region: ");
1216         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1217     }
1218     addr->used_user_addr = device_region.iova;
1219 
1220     return ok;
1221 }
1222 
vhost_vdpa_svq_setup(struct vhost_dev * dev,VhostShadowVirtqueue * svq,unsigned idx,Error ** errp)1223 static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1224                                  VhostShadowVirtqueue *svq, unsigned idx,
1225                                  Error **errp)
1226 {
1227     uint16_t vq_index = dev->vq_index + idx;
1228     struct vhost_vring_state s = {
1229         .index = vq_index,
1230     };
1231     int r;
1232 
1233     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1234     if (unlikely(r)) {
1235         error_setg_errno(errp, -r, "Cannot set vring base");
1236         return false;
1237     }
1238 
1239     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1240     return r == 0;
1241 }
1242 
vhost_vdpa_svqs_start(struct vhost_dev * dev)1243 static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1244 {
1245     struct vhost_vdpa *v = dev->opaque;
1246     Error *err = NULL;
1247     unsigned i;
1248 
1249     if (!v->shadow_vqs_enabled) {
1250         return true;
1251     }
1252 
1253     for (i = 0; i < v->shadow_vqs->len; ++i) {
1254         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1255         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1256         struct vhost_vring_addr addr = {
1257             .index = dev->vq_index + i,
1258         };
1259         int r;
1260         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1261         if (unlikely(!ok)) {
1262             goto err;
1263         }
1264 
1265         vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree);
1266         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1267         if (unlikely(!ok)) {
1268             goto err_map;
1269         }
1270 
1271         /* Override vring GPA set by vhost subsystem */
1272         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1273         if (unlikely(r != 0)) {
1274             error_setg_errno(&err, -r, "Cannot set device address");
1275             goto err_set_addr;
1276         }
1277     }
1278 
1279     return true;
1280 
1281 err_set_addr:
1282     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1283 
1284 err_map:
1285     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1286 
1287 err:
1288     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1289     for (unsigned j = 0; j < i; ++j) {
1290         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1291         vhost_vdpa_svq_unmap_rings(dev, svq);
1292         vhost_svq_stop(svq);
1293     }
1294 
1295     return false;
1296 }
1297 
vhost_vdpa_svqs_stop(struct vhost_dev * dev)1298 static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1299 {
1300     struct vhost_vdpa *v = dev->opaque;
1301 
1302     if (!v->shadow_vqs_enabled) {
1303         return;
1304     }
1305 
1306     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1307         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1308 
1309         vhost_svq_stop(svq);
1310         vhost_vdpa_svq_unmap_rings(dev, svq);
1311 
1312         event_notifier_cleanup(&svq->hdev_kick);
1313         event_notifier_cleanup(&svq->hdev_call);
1314     }
1315 }
1316 
vhost_vdpa_suspend(struct vhost_dev * dev)1317 static void vhost_vdpa_suspend(struct vhost_dev *dev)
1318 {
1319     struct vhost_vdpa *v = dev->opaque;
1320     int r;
1321 
1322     if (!vhost_vdpa_first_dev(dev)) {
1323         return;
1324     }
1325 
1326     if (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) {
1327         trace_vhost_vdpa_suspend(dev);
1328         r = ioctl(v->shared->device_fd, VHOST_VDPA_SUSPEND);
1329         if (unlikely(r)) {
1330             error_report("Cannot suspend: %s(%d)", g_strerror(errno), errno);
1331         } else {
1332             v->suspended = true;
1333             return;
1334         }
1335     }
1336 
1337     vhost_vdpa_reset_device(dev);
1338 }
1339 
vhost_vdpa_dev_start(struct vhost_dev * dev,bool started)1340 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1341 {
1342     struct vhost_vdpa *v = dev->opaque;
1343     bool ok;
1344     trace_vhost_vdpa_dev_start(dev, started);
1345 
1346     if (started) {
1347         vhost_vdpa_host_notifiers_init(dev);
1348         ok = vhost_vdpa_svqs_start(dev);
1349         if (unlikely(!ok)) {
1350             return -1;
1351         }
1352     } else {
1353         vhost_vdpa_suspend(dev);
1354         vhost_vdpa_svqs_stop(dev);
1355         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
1356     }
1357 
1358     if (!vhost_vdpa_last_dev(dev)) {
1359         return 0;
1360     }
1361 
1362     if (started) {
1363         if (vhost_dev_has_iommu(dev) && (v->shadow_vqs_enabled)) {
1364             error_report("SVQ can not work while IOMMU enable, please disable"
1365                          "IOMMU and try again");
1366             return -1;
1367         }
1368         memory_listener_register(&v->shared->listener, dev->vdev->dma_as);
1369 
1370         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1371     }
1372 
1373     return 0;
1374 }
1375 
vhost_vdpa_reset_status(struct vhost_dev * dev)1376 static void vhost_vdpa_reset_status(struct vhost_dev *dev)
1377 {
1378     struct vhost_vdpa *v = dev->opaque;
1379 
1380     if (!vhost_vdpa_last_dev(dev)) {
1381         return;
1382     }
1383 
1384     vhost_vdpa_reset_device(dev);
1385     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1386                                VIRTIO_CONFIG_S_DRIVER);
1387     memory_listener_unregister(&v->shared->listener);
1388 }
1389 
vhost_vdpa_set_log_base(struct vhost_dev * dev,uint64_t base,struct vhost_log * log)1390 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1391                                      struct vhost_log *log)
1392 {
1393     struct vhost_vdpa *v = dev->opaque;
1394     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
1395         return 0;
1396     }
1397 
1398     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1399                                   log->log);
1400     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1401 }
1402 
vhost_vdpa_set_vring_addr(struct vhost_dev * dev,struct vhost_vring_addr * addr)1403 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1404                                        struct vhost_vring_addr *addr)
1405 {
1406     struct vhost_vdpa *v = dev->opaque;
1407 
1408     if (v->shadow_vqs_enabled) {
1409         /*
1410          * Device vring addr was set at device start. SVQ base is handled by
1411          * VirtQueue code.
1412          */
1413         return 0;
1414     }
1415 
1416     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1417 }
1418 
vhost_vdpa_set_vring_num(struct vhost_dev * dev,struct vhost_vring_state * ring)1419 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1420                                       struct vhost_vring_state *ring)
1421 {
1422     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1423     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1424 }
1425 
vhost_vdpa_set_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)1426 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1427                                        struct vhost_vring_state *ring)
1428 {
1429     struct vhost_vdpa *v = dev->opaque;
1430 
1431     if (v->shadow_vqs_enabled) {
1432         /*
1433          * Device vring base was set at device start. SVQ base is handled by
1434          * VirtQueue code.
1435          */
1436         return 0;
1437     }
1438 
1439     return vhost_vdpa_set_dev_vring_base(dev, ring);
1440 }
1441 
vhost_vdpa_get_vring_base(struct vhost_dev * dev,struct vhost_vring_state * ring)1442 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1443                                        struct vhost_vring_state *ring)
1444 {
1445     struct vhost_vdpa *v = dev->opaque;
1446     int ret;
1447 
1448     if (v->shadow_vqs_enabled) {
1449         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
1450         trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, true);
1451         return 0;
1452     }
1453 
1454     if (!v->suspended) {
1455         /*
1456          * Cannot trust in value returned by device, let vhost recover used
1457          * idx from guest.
1458          */
1459         return -1;
1460     }
1461 
1462     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1463     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num, false);
1464     return ret;
1465 }
1466 
vhost_vdpa_set_vring_kick(struct vhost_dev * dev,struct vhost_vring_file * file)1467 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1468                                        struct vhost_vring_file *file)
1469 {
1470     struct vhost_vdpa *v = dev->opaque;
1471     int vdpa_idx = file->index - dev->vq_index;
1472 
1473     if (v->shadow_vqs_enabled) {
1474         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1475         vhost_svq_set_svq_kick_fd(svq, file->fd);
1476         return 0;
1477     } else {
1478         return vhost_vdpa_set_vring_dev_kick(dev, file);
1479     }
1480 }
1481 
vhost_vdpa_set_vring_call(struct vhost_dev * dev,struct vhost_vring_file * file)1482 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1483                                        struct vhost_vring_file *file)
1484 {
1485     struct vhost_vdpa *v = dev->opaque;
1486     int vdpa_idx = file->index - dev->vq_index;
1487     VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1488 
1489     /* Remember last call fd because we can switch to SVQ anytime. */
1490     vhost_svq_set_svq_call_fd(svq, file->fd);
1491     /*
1492      * When SVQ is transitioning to off, shadow_vqs_enabled has
1493      * not been set back to false yet, but the underlying call fd
1494      * will have to switch back to the guest notifier to signal the
1495      * passthrough virtqueues. In other situations, SVQ's own call
1496      * fd shall be used to signal the device model.
1497      */
1498     if (v->shadow_vqs_enabled &&
1499         v->shared->svq_switching != SVQ_TSTATE_DISABLING) {
1500         return 0;
1501     }
1502 
1503     return vhost_vdpa_set_vring_dev_call(dev, file);
1504 }
1505 
vhost_vdpa_get_features(struct vhost_dev * dev,uint64_t * features)1506 static int vhost_vdpa_get_features(struct vhost_dev *dev,
1507                                      uint64_t *features)
1508 {
1509     int ret = vhost_vdpa_get_dev_features(dev, features);
1510 
1511     if (ret == 0) {
1512         /* Add SVQ logging capabilities */
1513         *features |= BIT_ULL(VHOST_F_LOG_ALL);
1514     }
1515 
1516     return ret;
1517 }
1518 
vhost_vdpa_set_owner(struct vhost_dev * dev)1519 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1520 {
1521     if (!vhost_vdpa_first_dev(dev)) {
1522         return 0;
1523     }
1524 
1525     trace_vhost_vdpa_set_owner(dev);
1526     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1527 }
1528 
vhost_vdpa_vq_get_addr(struct vhost_dev * dev,struct vhost_vring_addr * addr,struct vhost_virtqueue * vq)1529 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1530                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1531 {
1532     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1533     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1534     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1535     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1536     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1537                                  addr->avail_user_addr, addr->used_user_addr);
1538     return 0;
1539 }
1540 
vhost_vdpa_force_iommu(struct vhost_dev * dev)1541 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1542 {
1543     return true;
1544 }
1545 
1546 const VhostOps vdpa_ops = {
1547         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1548         .vhost_backend_init = vhost_vdpa_init,
1549         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1550         .vhost_set_log_base = vhost_vdpa_set_log_base,
1551         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1552         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1553         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1554         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1555         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1556         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1557         .vhost_get_features = vhost_vdpa_get_features,
1558         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1559         .vhost_set_owner = vhost_vdpa_set_owner,
1560         .vhost_set_vring_endian = NULL,
1561         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1562         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1563         .vhost_set_features = vhost_vdpa_set_features,
1564         .vhost_reset_device = vhost_vdpa_reset_device,
1565         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1566         .vhost_set_vring_enable = vhost_vdpa_set_vring_enable,
1567         .vhost_get_config  = vhost_vdpa_get_config,
1568         .vhost_set_config = vhost_vdpa_set_config,
1569         .vhost_requires_shm_log = NULL,
1570         .vhost_migration_done = NULL,
1571         .vhost_net_set_mtu = NULL,
1572         .vhost_set_iotlb_callback = NULL,
1573         .vhost_send_device_iotlb_msg = NULL,
1574         .vhost_dev_start = vhost_vdpa_dev_start,
1575         .vhost_get_device_id = vhost_vdpa_get_device_id,
1576         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1577         .vhost_force_iommu = vhost_vdpa_force_iommu,
1578         .vhost_set_config_call = vhost_vdpa_set_config_call,
1579         .vhost_reset_status = vhost_vdpa_reset_status,
1580 };
1581