xref: /openbmc/qemu/hw/virtio/vhost-vdpa.c (revision 587adaca)
1 /*
2  * vhost-vdpa
3  *
4  *  Copyright(c) 2017-2018 Intel Corporation.
5  *  Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include <linux/vhost.h>
14 #include <linux/vfio.h>
15 #include <sys/eventfd.h>
16 #include <sys/ioctl.h>
17 #include "hw/virtio/vhost.h"
18 #include "hw/virtio/vhost-backend.h"
19 #include "hw/virtio/virtio-net.h"
20 #include "hw/virtio/vhost-vdpa.h"
21 #include "exec/address-spaces.h"
22 #include "qemu/main-loop.h"
23 #include "cpu.h"
24 #include "trace.h"
25 #include "qemu-common.h"
26 
27 static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
28 {
29     return (!memory_region_is_ram(section->mr) &&
30             !memory_region_is_iommu(section->mr)) ||
31            /* vhost-vDPA doesn't allow MMIO to be mapped  */
32             memory_region_is_ram_device(section->mr) ||
33            /*
34             * Sizing an enabled 64-bit BAR can cause spurious mappings to
35             * addresses in the upper part of the 64-bit address space.  These
36             * are never accessed by the CPU and beyond the address width of
37             * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU width.
38             */
39            section->offset_within_address_space & (1ULL << 63);
40 }
41 
42 static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
43                               void *vaddr, bool readonly)
44 {
45     struct vhost_msg_v2 msg = {};
46     int fd = v->device_fd;
47     int ret = 0;
48 
49     msg.type = v->msg_type;
50     msg.iotlb.iova = iova;
51     msg.iotlb.size = size;
52     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
53     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
54     msg.iotlb.type = VHOST_IOTLB_UPDATE;
55 
56    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
57                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
58 
59     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
60         error_report("failed to write, fd=%d, errno=%d (%s)",
61             fd, errno, strerror(errno));
62         return -EIO ;
63     }
64 
65     return ret;
66 }
67 
68 static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
69                                 hwaddr size)
70 {
71     struct vhost_msg_v2 msg = {};
72     int fd = v->device_fd;
73     int ret = 0;
74 
75     msg.type = v->msg_type;
76     msg.iotlb.iova = iova;
77     msg.iotlb.size = size;
78     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
79 
80     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
81                                msg.iotlb.size, msg.iotlb.type);
82 
83     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
84         error_report("failed to write, fd=%d, errno=%d (%s)",
85             fd, errno, strerror(errno));
86         return -EIO ;
87     }
88 
89     return ret;
90 }
91 
92 static void vhost_vdpa_listener_begin(MemoryListener *listener)
93 {
94     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
95     struct vhost_dev *dev = v->dev;
96     struct vhost_msg_v2 msg = {};
97     int fd = v->device_fd;
98 
99     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
100         return;
101     }
102 
103     msg.type = v->msg_type;
104     msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
105 
106     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
107         error_report("failed to write, fd=%d, errno=%d (%s)",
108                      fd, errno, strerror(errno));
109     }
110 }
111 
112 static void vhost_vdpa_listener_commit(MemoryListener *listener)
113 {
114     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
115     struct vhost_dev *dev = v->dev;
116     struct vhost_msg_v2 msg = {};
117     int fd = v->device_fd;
118 
119     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
120         return;
121     }
122 
123     msg.type = v->msg_type;
124     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
125 
126     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
127         error_report("failed to write, fd=%d, errno=%d (%s)",
128                      fd, errno, strerror(errno));
129     }
130 }
131 
132 static void vhost_vdpa_listener_region_add(MemoryListener *listener,
133                                            MemoryRegionSection *section)
134 {
135     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
136     hwaddr iova;
137     Int128 llend, llsize;
138     void *vaddr;
139     int ret;
140 
141     if (vhost_vdpa_listener_skipped_section(section)) {
142         return;
143     }
144 
145     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
146                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
147         error_report("%s received unaligned region", __func__);
148         return;
149     }
150 
151     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
152     llend = int128_make64(section->offset_within_address_space);
153     llend = int128_add(llend, section->size);
154     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
155 
156     if (int128_ge(int128_make64(iova), llend)) {
157         return;
158     }
159 
160     memory_region_ref(section->mr);
161 
162     /* Here we assume that memory_region_is_ram(section->mr)==true */
163 
164     vaddr = memory_region_get_ram_ptr(section->mr) +
165             section->offset_within_region +
166             (iova - section->offset_within_address_space);
167 
168     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
169                                          vaddr, section->readonly);
170 
171     llsize = int128_sub(llend, int128_make64(iova));
172 
173     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
174                              vaddr, section->readonly);
175     if (ret) {
176         error_report("vhost vdpa map fail!");
177         goto fail;
178     }
179 
180     return;
181 
182 fail:
183     /*
184      * On the initfn path, store the first error in the container so we
185      * can gracefully fail.  Runtime, there's not much we can do other
186      * than throw a hardware error.
187      */
188     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
189     return;
190 
191 }
192 
193 static void vhost_vdpa_listener_region_del(MemoryListener *listener,
194                                            MemoryRegionSection *section)
195 {
196     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
197     hwaddr iova;
198     Int128 llend, llsize;
199     int ret;
200 
201     if (vhost_vdpa_listener_skipped_section(section)) {
202         return;
203     }
204 
205     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
206                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
207         error_report("%s received unaligned region", __func__);
208         return;
209     }
210 
211     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
212     llend = int128_make64(section->offset_within_address_space);
213     llend = int128_add(llend, section->size);
214     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
215 
216     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
217 
218     if (int128_ge(int128_make64(iova), llend)) {
219         return;
220     }
221 
222     llsize = int128_sub(llend, int128_make64(iova));
223 
224     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
225     if (ret) {
226         error_report("vhost_vdpa dma unmap error!");
227     }
228 
229     memory_region_unref(section->mr);
230 }
231 /*
232  * IOTLB API is used by vhost-vpda which requires incremental updating
233  * of the mapping. So we can not use generic vhost memory listener which
234  * depends on the addnop().
235  */
236 static const MemoryListener vhost_vdpa_memory_listener = {
237     .begin = vhost_vdpa_listener_begin,
238     .commit = vhost_vdpa_listener_commit,
239     .region_add = vhost_vdpa_listener_region_add,
240     .region_del = vhost_vdpa_listener_region_del,
241 };
242 
243 static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
244                              void *arg)
245 {
246     struct vhost_vdpa *v = dev->opaque;
247     int fd = v->device_fd;
248 
249     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
250 
251     return ioctl(fd, request, arg);
252 }
253 
254 static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
255 {
256     uint8_t s;
257 
258     trace_vhost_vdpa_add_status(dev, status);
259     if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
260         return;
261     }
262 
263     s |= status;
264 
265     vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
266 }
267 
268 static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
269 {
270     struct vhost_vdpa *v;
271     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
272     trace_vhost_vdpa_init(dev, opaque);
273 
274     v = opaque;
275     v->dev = dev;
276     dev->opaque =  opaque ;
277     v->listener = vhost_vdpa_memory_listener;
278     v->msg_type = VHOST_IOTLB_MSG_V2;
279 
280     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
281                                VIRTIO_CONFIG_S_DRIVER);
282 
283     return 0;
284 }
285 
286 static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
287                                             int queue_index)
288 {
289     size_t page_size = qemu_real_host_page_size;
290     struct vhost_vdpa *v = dev->opaque;
291     VirtIODevice *vdev = dev->vdev;
292     VhostVDPAHostNotifier *n;
293 
294     n = &v->notifier[queue_index];
295 
296     if (n->addr) {
297         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
298         object_unparent(OBJECT(&n->mr));
299         munmap(n->addr, page_size);
300         n->addr = NULL;
301     }
302 }
303 
304 static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
305 {
306     int i;
307 
308     for (i = 0; i < n; i++) {
309         vhost_vdpa_host_notifier_uninit(dev, i);
310     }
311 }
312 
313 static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
314 {
315     size_t page_size = qemu_real_host_page_size;
316     struct vhost_vdpa *v = dev->opaque;
317     VirtIODevice *vdev = dev->vdev;
318     VhostVDPAHostNotifier *n;
319     int fd = v->device_fd;
320     void *addr;
321     char *name;
322 
323     vhost_vdpa_host_notifier_uninit(dev, queue_index);
324 
325     n = &v->notifier[queue_index];
326 
327     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
328                 queue_index * page_size);
329     if (addr == MAP_FAILED) {
330         goto err;
331     }
332 
333     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
334                            v, queue_index);
335     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
336                                       page_size, addr);
337     g_free(name);
338 
339     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
340         munmap(addr, page_size);
341         goto err;
342     }
343     n->addr = addr;
344 
345     return 0;
346 
347 err:
348     return -1;
349 }
350 
351 static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
352 {
353     int i;
354 
355     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
356         if (vhost_vdpa_host_notifier_init(dev, i)) {
357             goto err;
358         }
359     }
360 
361     return;
362 
363 err:
364     vhost_vdpa_host_notifiers_uninit(dev, i);
365     return;
366 }
367 
368 static int vhost_vdpa_cleanup(struct vhost_dev *dev)
369 {
370     struct vhost_vdpa *v;
371     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
372     v = dev->opaque;
373     trace_vhost_vdpa_cleanup(dev, v);
374     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
375     memory_listener_unregister(&v->listener);
376 
377     dev->opaque = NULL;
378     return 0;
379 }
380 
381 static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
382 {
383     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
384     return INT_MAX;
385 }
386 
387 static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
388                                     struct vhost_memory *mem)
389 {
390     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
391     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
392         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
393         int i;
394         for (i = 0; i < mem->nregions; i++) {
395             trace_vhost_vdpa_dump_regions(dev, i,
396                                           mem->regions[i].guest_phys_addr,
397                                           mem->regions[i].memory_size,
398                                           mem->regions[i].userspace_addr,
399                                           mem->regions[i].flags_padding);
400         }
401     }
402     if (mem->padding) {
403         return -1;
404     }
405 
406     return 0;
407 }
408 
409 static int vhost_vdpa_set_features(struct vhost_dev *dev,
410                                    uint64_t features)
411 {
412     int ret;
413     trace_vhost_vdpa_set_features(dev, features);
414     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
415     uint8_t status = 0;
416     if (ret) {
417         return ret;
418     }
419     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
420     vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
421 
422     return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
423 }
424 
425 static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
426 {
427     uint64_t features;
428     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
429         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
430     int r;
431 
432     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
433         return 0;
434     }
435 
436     features &= f;
437     r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
438     if (r) {
439         return 0;
440     }
441 
442     dev->backend_cap = features;
443 
444     return 0;
445 }
446 
447 static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
448                                     uint32_t *device_id)
449 {
450     int ret;
451     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
452     trace_vhost_vdpa_get_device_id(dev, *device_id);
453     return ret;
454 }
455 
456 static int vhost_vdpa_reset_device(struct vhost_dev *dev)
457 {
458     int ret;
459     uint8_t status = 0;
460 
461     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
462     trace_vhost_vdpa_reset_device(dev, status);
463     return ret;
464 }
465 
466 static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
467 {
468     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
469 
470     trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
471     return idx - dev->vq_index;
472 }
473 
474 static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
475 {
476     int i;
477     trace_vhost_vdpa_set_vring_ready(dev);
478     for (i = 0; i < dev->nvqs; ++i) {
479         struct vhost_vring_state state = {
480             .index = dev->vq_index + i,
481             .num = 1,
482         };
483         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
484     }
485     return 0;
486 }
487 
488 static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
489                                    uint32_t config_len)
490 {
491     int b, len;
492     char line[QEMU_HEXDUMP_LINE_LEN];
493 
494     for (b = 0; b < config_len; b += 16) {
495         len = config_len - b;
496         qemu_hexdump_line(line, b, config, len, false);
497         trace_vhost_vdpa_dump_config(dev, line);
498     }
499 }
500 
501 static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
502                                    uint32_t offset, uint32_t size,
503                                    uint32_t flags)
504 {
505     struct vhost_vdpa_config *config;
506     int ret;
507     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
508 
509     trace_vhost_vdpa_set_config(dev, offset, size, flags);
510     config = g_malloc(size + config_size);
511     config->off = offset;
512     config->len = size;
513     memcpy(config->buf, data, size);
514     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
515         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
516         vhost_vdpa_dump_config(dev, data, size);
517     }
518     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
519     g_free(config);
520     return ret;
521 }
522 
523 static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
524                                    uint32_t config_len)
525 {
526     struct vhost_vdpa_config *v_config;
527     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
528     int ret;
529 
530     trace_vhost_vdpa_get_config(dev, config, config_len);
531     v_config = g_malloc(config_len + config_size);
532     v_config->len = config_len;
533     v_config->off = 0;
534     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
535     memcpy(config, v_config->buf, config_len);
536     g_free(v_config);
537     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
538         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
539         vhost_vdpa_dump_config(dev, config, config_len);
540     }
541     return ret;
542  }
543 
544 static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
545 {
546     struct vhost_vdpa *v = dev->opaque;
547     trace_vhost_vdpa_dev_start(dev, started);
548     if (started) {
549         uint8_t status = 0;
550         memory_listener_register(&v->listener, &address_space_memory);
551         vhost_vdpa_host_notifiers_init(dev);
552         vhost_vdpa_set_vring_ready(dev);
553         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
554         vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
555 
556         return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
557     } else {
558         vhost_vdpa_reset_device(dev);
559         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
560                                    VIRTIO_CONFIG_S_DRIVER);
561         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
562         memory_listener_unregister(&v->listener);
563 
564         return 0;
565     }
566 }
567 
568 static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
569                                      struct vhost_log *log)
570 {
571     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
572                                   log->log);
573     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
574 }
575 
576 static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
577                                        struct vhost_vring_addr *addr)
578 {
579     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
580                                     addr->desc_user_addr, addr->used_user_addr,
581                                     addr->avail_user_addr,
582                                     addr->log_guest_addr);
583     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
584 }
585 
586 static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
587                                       struct vhost_vring_state *ring)
588 {
589     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
590     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
591 }
592 
593 static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
594                                        struct vhost_vring_state *ring)
595 {
596     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
597     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
598 }
599 
600 static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
601                                        struct vhost_vring_state *ring)
602 {
603     int ret;
604 
605     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
606     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
607     return ret;
608 }
609 
610 static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
611                                        struct vhost_vring_file *file)
612 {
613     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
614     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
615 }
616 
617 static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
618                                        struct vhost_vring_file *file)
619 {
620     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
621     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
622 }
623 
624 static int vhost_vdpa_get_features(struct vhost_dev *dev,
625                                      uint64_t *features)
626 {
627     int ret;
628 
629     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
630     trace_vhost_vdpa_get_features(dev, *features);
631     return ret;
632 }
633 
634 static int vhost_vdpa_set_owner(struct vhost_dev *dev)
635 {
636     trace_vhost_vdpa_set_owner(dev);
637     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
638 }
639 
640 static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
641                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
642 {
643     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
644     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
645     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
646     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
647     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
648                                  addr->avail_user_addr, addr->used_user_addr);
649     return 0;
650 }
651 
652 static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
653 {
654     return true;
655 }
656 
657 const VhostOps vdpa_ops = {
658         .backend_type = VHOST_BACKEND_TYPE_VDPA,
659         .vhost_backend_init = vhost_vdpa_init,
660         .vhost_backend_cleanup = vhost_vdpa_cleanup,
661         .vhost_set_log_base = vhost_vdpa_set_log_base,
662         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
663         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
664         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
665         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
666         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
667         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
668         .vhost_get_features = vhost_vdpa_get_features,
669         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
670         .vhost_set_owner = vhost_vdpa_set_owner,
671         .vhost_set_vring_endian = NULL,
672         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
673         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
674         .vhost_set_features = vhost_vdpa_set_features,
675         .vhost_reset_device = vhost_vdpa_reset_device,
676         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
677         .vhost_get_config  = vhost_vdpa_get_config,
678         .vhost_set_config = vhost_vdpa_set_config,
679         .vhost_requires_shm_log = NULL,
680         .vhost_migration_done = NULL,
681         .vhost_backend_can_merge = NULL,
682         .vhost_net_set_mtu = NULL,
683         .vhost_set_iotlb_callback = NULL,
684         .vhost_send_device_iotlb_msg = NULL,
685         .vhost_dev_start = vhost_vdpa_dev_start,
686         .vhost_get_device_id = vhost_vdpa_get_device_id,
687         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
688         .vhost_force_iommu = vhost_vdpa_force_iommu,
689 };
690