xref: /openbmc/qemu/hw/virtio/vhost-vdpa.c (revision 6c1ebe75)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26 
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
28 {
29     return (!memory_region_is_ram(section->mr) &&
30             !memory_region_is_iommu(section->mr)) ||
31            /* vhost-vDPA doesn't allow MMIO to be mapped  */
32             memory_region_is_ram_device(section->mr) ||
33            /*
34             * Sizing an enabled 64-bit BAR can cause spurious mappings to
35             * addresses in the upper part of the 64-bit address space.  These
36             * are never accessed by the CPU and beyond the address width of
37             * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU width.
38             */
39            section->offset_within_address_space & (1ULL << 63);
40 }
41 
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43                               void *vaddr, bool readonly)
44 {
45     struct vhost_msg_v2 msg = {};
46     int fd = v->device_fd;
47     int ret = 0;
48 
49     msg.type = v->msg_type;
50     msg.iotlb.iova = iova;
51     msg.iotlb.size = size;
52     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54     msg.iotlb.type = VHOST_IOTLB_UPDATE;
55 
56    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
58 
59     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60         error_report("failed to write, fd=%d, errno=%d (%s)",
61             fd, errno, strerror(errno));
62         return -EIO ;
63     }
64 
65     return ret;
66 }
67 
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69                                 hwaddr size)
70 {
71     struct vhost_msg_v2 msg = {};
72     int fd = v->device_fd;
73     int ret = 0;
74 
75     msg.type = v->msg_type;
76     msg.iotlb.iova = iova;
77     msg.iotlb.size = size;
78     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
79 
80     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81                                msg.iotlb.size, msg.iotlb.type);
82 
83     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84         error_report("failed to write, fd=%d, errno=%d (%s)",
85             fd, errno, strerror(errno));
86         return -EIO ;
87     }
88 
89     return ret;
90 }
91 
92 static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
93 {
94     int fd = v->device_fd;
95     struct vhost_msg_v2 msg = {
96         .type = v->msg_type,
97         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
98     };
99 
100     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
101         error_report("failed to write, fd=%d, errno=%d (%s)",
102                      fd, errno, strerror(errno));
103     }
104 }
105 
106 static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
107 {
108     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
109         !v->iotlb_batch_begin_sent) {
110         vhost_vdpa_listener_begin_batch(v);
111     }
112 
113     v->iotlb_batch_begin_sent = true;
114 }
115 
116 static void vhost_vdpa_listener_commit(MemoryListener *listener)
117 {
118     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
119     struct vhost_dev *dev = v->dev;
120     struct vhost_msg_v2 msg = {};
121     int fd = v->device_fd;
122 
123     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
124         return;
125     }
126 
127     if (!v->iotlb_batch_begin_sent) {
128         return;
129     }
130 
131     msg.type = v->msg_type;
132     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
133 
134     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
135         error_report("failed to write, fd=%d, errno=%d (%s)",
136                      fd, errno, strerror(errno));
137     }
138 
139     v->iotlb_batch_begin_sent = false;
140 }
141 
142 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
143                                            MemoryRegionSection *section)
144 {
145     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
146     hwaddr iova;
147     Int128 llend, llsize;
148     void *vaddr;
149     int ret;
150 
151     if (vhost_vdpa_listener_skipped_section(section)) {
152         return;
153     }
154 
155     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
156                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
157         error_report("%s received unaligned region", __func__);
158         return;
159     }
160 
161     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
162     llend = int128_make64(section->offset_within_address_space);
163     llend = int128_add(llend, section->size);
164     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
165 
166     if (int128_ge(int128_make64(iova), llend)) {
167         return;
168     }
169 
170     memory_region_ref(section->mr);
171 
172     /* Here we assume that memory_region_is_ram(section->mr)==true */
173 
174     vaddr = memory_region_get_ram_ptr(section->mr) +
175             section->offset_within_region +
176             (iova - section->offset_within_address_space);
177 
178     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
179                                          vaddr, section->readonly);
180 
181     llsize = int128_sub(llend, int128_make64(iova));
182 
183     vhost_vdpa_iotlb_batch_begin_once(v);
184     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
185                              vaddr, section->readonly);
186     if (ret) {
187         error_report("vhost vdpa map fail!");
188         goto fail;
189     }
190 
191     return;
192 
193 fail:
194     /*
195      * On the initfn path, store the first error in the container so we
196      * can gracefully fail.  Runtime, there's not much we can do other
197      * than throw a hardware error.
198      */
199     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
200     return;
201 
202 }
203 
204 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
205                                            MemoryRegionSection *section)
206 {
207     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
208     hwaddr iova;
209     Int128 llend, llsize;
210     int ret;
211 
212     if (vhost_vdpa_listener_skipped_section(section)) {
213         return;
214     }
215 
216     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
217                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
218         error_report("%s received unaligned region", __func__);
219         return;
220     }
221 
222     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
223     llend = int128_make64(section->offset_within_address_space);
224     llend = int128_add(llend, section->size);
225     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
226 
227     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
228 
229     if (int128_ge(int128_make64(iova), llend)) {
230         return;
231     }
232 
233     llsize = int128_sub(llend, int128_make64(iova));
234 
235     vhost_vdpa_iotlb_batch_begin_once(v);
236     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
237     if (ret) {
238         error_report("vhost_vdpa dma unmap error!");
239     }
240 
241     memory_region_unref(section->mr);
242 }
243 /*
244  * IOTLB API is used by vhost-vpda which requires incremental updating
245  * of the mapping. So we can not use generic vhost memory listener which
246  * depends on the addnop().
247  */
248 static const MemoryListener vhost_vdpa_memory_listener = {
249     .commit = vhost_vdpa_listener_commit,
250     .region_add = vhost_vdpa_listener_region_add,
251     .region_del = vhost_vdpa_listener_region_del,
252 };
253 
254 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
255                              void *arg)
256 {
257     struct vhost_vdpa *v = dev->opaque;
258     int fd = v->device_fd;
259     int ret;
260 
261     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
262 
263     ret = ioctl(fd, request, arg);
264     return ret < 0 ? -errno : ret;
265 }
266 
267 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
268 {
269     uint8_t s;
270 
271     trace_vhost_vdpa_add_status(dev, status);
272     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
273         return;
274     }
275 
276     s |= status;
277 
278     vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
279 }
280 
281 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
282 {
283     struct vhost_vdpa *v;
284     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
285     trace_vhost_vdpa_init(dev, opaque);
286 
287     v = opaque;
288     v->dev = dev;
289     dev->opaque =  opaque ;
290     v->listener = vhost_vdpa_memory_listener;
291     v->msg_type = VHOST_IOTLB_MSG_V2;
292 
293     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
294                                VIRTIO_CONFIG_S_DRIVER);
295 
296     return 0;
297 }
298 
299 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
300                                             int queue_index)
301 {
302     size_t page_size = qemu_real_host_page_size;
303     struct vhost_vdpa *v = dev->opaque;
304     VirtIODevice *vdev = dev->vdev;
305     VhostVDPAHostNotifier *n;
306 
307     n = &v->notifier[queue_index];
308 
309     if (n->addr) {
310         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
311         object_unparent(OBJECT(&n->mr));
312         munmap(n->addr, page_size);
313         n->addr = NULL;
314     }
315 }
316 
317 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
318 {
319     int i;
320 
321     for (i = 0; i < n; i++) {
322         vhost_vdpa_host_notifier_uninit(dev, i);
323     }
324 }
325 
326 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
327 {
328     size_t page_size = qemu_real_host_page_size;
329     struct vhost_vdpa *v = dev->opaque;
330     VirtIODevice *vdev = dev->vdev;
331     VhostVDPAHostNotifier *n;
332     int fd = v->device_fd;
333     void *addr;
334     char *name;
335 
336     vhost_vdpa_host_notifier_uninit(dev, queue_index);
337 
338     n = &v->notifier[queue_index];
339 
340     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
341                 queue_index * page_size);
342     if (addr == MAP_FAILED) {
343         goto err;
344     }
345 
346     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
347                            v, queue_index);
348     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
349                                       page_size, addr);
350     g_free(name);
351 
352     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
353         munmap(addr, page_size);
354         goto err;
355     }
356     n->addr = addr;
357 
358     return 0;
359 
360 err:
361     return -1;
362 }
363 
364 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
365 {
366     int i;
367 
368     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
369         if (vhost_vdpa_host_notifier_init(dev, i)) {
370             goto err;
371         }
372     }
373 
374     return;
375 
376 err:
377     vhost_vdpa_host_notifiers_uninit(dev, i);
378     return;
379 }
380 
381 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
382 {
383     struct vhost_vdpa *v;
384     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
385     v = dev->opaque;
386     trace_vhost_vdpa_cleanup(dev, v);
387     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
388     memory_listener_unregister(&v->listener);
389 
390     dev->opaque = NULL;
391     return 0;
392 }
393 
394 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
395 {
396     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
397     return INT_MAX;
398 }
399 
400 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
401                                     struct vhost_memory *mem)
402 {
403     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
404     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
405         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
406         int i;
407         for (i = 0; i < mem->nregions; i++) {
408             trace_vhost_vdpa_dump_regions(dev, i,
409                                           mem->regions[i].guest_phys_addr,
410                                           mem->regions[i].memory_size,
411                                           mem->regions[i].userspace_addr,
412                                           mem->regions[i].flags_padding);
413         }
414     }
415     if (mem->padding) {
416         return -1;
417     }
418 
419     return 0;
420 }
421 
422 static int vhost_vdpa_set_features(struct vhost_dev *dev,
423                                    uint64_t features)
424 {
425     int ret;
426     trace_vhost_vdpa_set_features(dev, features);
427     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
428     uint8_t status = 0;
429     if (ret) {
430         return ret;
431     }
432     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
433     vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
434 
435     return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
436 }
437 
438 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
439 {
440     uint64_t features;
441     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
442         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
443     int r;
444 
445     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
446         return 0;
447     }
448 
449     features &= f;
450     r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
451     if (r) {
452         return 0;
453     }
454 
455     dev->backend_cap = features;
456 
457     return 0;
458 }
459 
460 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
461                                     uint32_t *device_id)
462 {
463     int ret;
464     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
465     trace_vhost_vdpa_get_device_id(dev, *device_id);
466     return ret;
467 }
468 
469 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
470 {
471     int ret;
472     uint8_t status = 0;
473 
474     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
475     trace_vhost_vdpa_reset_device(dev, status);
476     return ret;
477 }
478 
479 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
480 {
481     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
482 
483     trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
484     return idx - dev->vq_index;
485 }
486 
487 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
488 {
489     int i;
490     trace_vhost_vdpa_set_vring_ready(dev);
491     for (i = 0; i < dev->nvqs; ++i) {
492         struct vhost_vring_state state = {
493             .index = dev->vq_index + i,
494             .num = 1,
495         };
496         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
497     }
498     return 0;
499 }
500 
501 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
502                                    uint32_t config_len)
503 {
504     int b, len;
505     char line[QEMU_HEXDUMP_LINE_LEN];
506 
507     for (b = 0; b < config_len; b += 16) {
508         len = config_len - b;
509         qemu_hexdump_line(line, b, config, len, false);
510         trace_vhost_vdpa_dump_config(dev, line);
511     }
512 }
513 
514 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
515                                    uint32_t offset, uint32_t size,
516                                    uint32_t flags)
517 {
518     struct vhost_vdpa_config *config;
519     int ret;
520     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
521 
522     trace_vhost_vdpa_set_config(dev, offset, size, flags);
523     config = g_malloc(size + config_size);
524     config->off = offset;
525     config->len = size;
526     memcpy(config->buf, data, size);
527     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
528         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
529         vhost_vdpa_dump_config(dev, data, size);
530     }
531     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
532     g_free(config);
533     return ret;
534 }
535 
536 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
537                                    uint32_t config_len, Error **errp)
538 {
539     struct vhost_vdpa_config *v_config;
540     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
541     int ret;
542 
543     trace_vhost_vdpa_get_config(dev, config, config_len);
544     v_config = g_malloc(config_len + config_size);
545     v_config->len = config_len;
546     v_config->off = 0;
547     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
548     memcpy(config, v_config->buf, config_len);
549     g_free(v_config);
550     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
551         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
552         vhost_vdpa_dump_config(dev, config, config_len);
553     }
554     return ret;
555  }
556 
557 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
558 {
559     struct vhost_vdpa *v = dev->opaque;
560     trace_vhost_vdpa_dev_start(dev, started);
561     if (started) {
562         uint8_t status = 0;
563         memory_listener_register(&v->listener, &address_space_memory);
564         vhost_vdpa_host_notifiers_init(dev);
565         vhost_vdpa_set_vring_ready(dev);
566         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
567         vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
568 
569         return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
570     } else {
571         vhost_vdpa_reset_device(dev);
572         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
573                                    VIRTIO_CONFIG_S_DRIVER);
574         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
575         memory_listener_unregister(&v->listener);
576 
577         return 0;
578     }
579 }
580 
581 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
582                                      struct vhost_log *log)
583 {
584     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
585                                   log->log);
586     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
587 }
588 
589 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
590                                        struct vhost_vring_addr *addr)
591 {
592     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
593                                     addr->desc_user_addr, addr->used_user_addr,
594                                     addr->avail_user_addr,
595                                     addr->log_guest_addr);
596     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
597 }
598 
599 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
600                                       struct vhost_vring_state *ring)
601 {
602     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
603     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
604 }
605 
606 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
607                                        struct vhost_vring_state *ring)
608 {
609     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
610     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
611 }
612 
613 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
614                                        struct vhost_vring_state *ring)
615 {
616     int ret;
617 
618     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
619     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
620     return ret;
621 }
622 
623 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
624                                        struct vhost_vring_file *file)
625 {
626     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
627     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
628 }
629 
630 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
631                                        struct vhost_vring_file *file)
632 {
633     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
634     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
635 }
636 
637 static int vhost_vdpa_get_features(struct vhost_dev *dev,
638                                      uint64_t *features)
639 {
640     int ret;
641 
642     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
643     trace_vhost_vdpa_get_features(dev, *features);
644     return ret;
645 }
646 
647 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
648 {
649     trace_vhost_vdpa_set_owner(dev);
650     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
651 }
652 
653 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
654                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
655 {
656     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
657     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
658     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
659     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
660     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
661                                  addr->avail_user_addr, addr->used_user_addr);
662     return 0;
663 }
664 
665 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
666 {
667     return true;
668 }
669 
670 const VhostOps vdpa_ops = {
671         .backend_type = VHOST_BACKEND_TYPE_VDPA,
672         .vhost_backend_init = vhost_vdpa_init,
673         .vhost_backend_cleanup = vhost_vdpa_cleanup,
674         .vhost_set_log_base = vhost_vdpa_set_log_base,
675         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
676         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
677         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
678         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
679         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
680         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
681         .vhost_get_features = vhost_vdpa_get_features,
682         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
683         .vhost_set_owner = vhost_vdpa_set_owner,
684         .vhost_set_vring_endian = NULL,
685         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
686         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
687         .vhost_set_features = vhost_vdpa_set_features,
688         .vhost_reset_device = vhost_vdpa_reset_device,
689         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
690         .vhost_get_config  = vhost_vdpa_get_config,
691         .vhost_set_config = vhost_vdpa_set_config,
692         .vhost_requires_shm_log = NULL,
693         .vhost_migration_done = NULL,
694         .vhost_backend_can_merge = NULL,
695         .vhost_net_set_mtu = NULL,
696         .vhost_set_iotlb_callback = NULL,
697         .vhost_send_device_iotlb_msg = NULL,
698         .vhost_dev_start = vhost_vdpa_dev_start,
699         .vhost_get_device_id = vhost_vdpa_get_device_id,
700         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
701         .vhost_force_iommu = vhost_vdpa_force_iommu,
702 };
703