xref: /openbmc/linux/drivers/vdpa/vdpa_sim/vdpa_sim.c (revision 66dabbb6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA device simulator core.
4  *
5  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <uapi/linux/vdpa.h>
21 
22 #include "vdpa_sim.h"
23 
24 #define DRV_VERSION  "0.1"
25 #define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
26 #define DRV_DESC     "vDPA Device Simulator core"
27 #define DRV_LICENSE  "GPL v2"
28 
29 static int batch_mapping = 1;
30 module_param(batch_mapping, int, 0444);
31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
32 
33 static int max_iotlb_entries = 2048;
34 module_param(max_iotlb_entries, int, 0444);
35 MODULE_PARM_DESC(max_iotlb_entries,
36 		 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
37 
38 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
39 #define VDPASIM_QUEUE_MAX 256
40 #define VDPASIM_VENDOR_ID 0
41 
42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
43 {
44 	return container_of(vdpa, struct vdpasim, vdpa);
45 }
46 
47 static void vdpasim_vq_notify(struct vringh *vring)
48 {
49 	struct vdpasim_virtqueue *vq =
50 		container_of(vring, struct vdpasim_virtqueue, vring);
51 
52 	if (!vq->cb)
53 		return;
54 
55 	vq->cb(vq->private);
56 }
57 
58 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
59 {
60 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
61 	uint16_t last_avail_idx = vq->vring.last_avail_idx;
62 
63 	vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true,
64 			  (struct vring_desc *)(uintptr_t)vq->desc_addr,
65 			  (struct vring_avail *)
66 			  (uintptr_t)vq->driver_addr,
67 			  (struct vring_used *)
68 			  (uintptr_t)vq->device_addr);
69 
70 	vq->vring.last_avail_idx = last_avail_idx;
71 
72 	/*
73 	 * Since vdpa_sim does not support receive inflight descriptors as a
74 	 * destination of a migration, let's set both avail_idx and used_idx
75 	 * the same at vq start.  This is how vhost-user works in a
76 	 * VHOST_SET_VRING_BASE call.
77 	 *
78 	 * Although the simple fix is to set last_used_idx at
79 	 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
80 	 */
81 	vq->vring.last_used_idx = last_avail_idx;
82 	vq->vring.notify = vdpasim_vq_notify;
83 }
84 
85 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
86 			     struct vdpasim_virtqueue *vq)
87 {
88 	vq->ready = false;
89 	vq->desc_addr = 0;
90 	vq->driver_addr = 0;
91 	vq->device_addr = 0;
92 	vq->cb = NULL;
93 	vq->private = NULL;
94 	vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
95 			  VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
96 
97 	vq->vring.notify = NULL;
98 }
99 
100 static void vdpasim_do_reset(struct vdpasim *vdpasim)
101 {
102 	int i;
103 
104 	spin_lock(&vdpasim->iommu_lock);
105 
106 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
107 		vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
108 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
109 				 &vdpasim->iommu_lock);
110 	}
111 
112 	for (i = 0; i < vdpasim->dev_attr.nas; i++) {
113 		vhost_iotlb_reset(&vdpasim->iommu[i]);
114 		vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
115 				      0, VHOST_MAP_RW);
116 		vdpasim->iommu_pt[i] = true;
117 	}
118 
119 	vdpasim->running = true;
120 	spin_unlock(&vdpasim->iommu_lock);
121 
122 	vdpasim->features = 0;
123 	vdpasim->status = 0;
124 	++vdpasim->generation;
125 }
126 
127 static const struct vdpa_config_ops vdpasim_config_ops;
128 static const struct vdpa_config_ops vdpasim_batch_config_ops;
129 
130 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
131 			       const struct vdpa_dev_set_config *config)
132 {
133 	const struct vdpa_config_ops *ops;
134 	struct vdpa_device *vdpa;
135 	struct vdpasim *vdpasim;
136 	struct device *dev;
137 	int i, ret = -ENOMEM;
138 
139 	if (!dev_attr->alloc_size)
140 		return ERR_PTR(-EINVAL);
141 
142 	if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
143 		if (config->device_features &
144 		    ~dev_attr->supported_features)
145 			return ERR_PTR(-EINVAL);
146 		dev_attr->supported_features =
147 			config->device_features;
148 	}
149 
150 	if (batch_mapping)
151 		ops = &vdpasim_batch_config_ops;
152 	else
153 		ops = &vdpasim_config_ops;
154 
155 	vdpa = __vdpa_alloc_device(NULL, ops,
156 				   dev_attr->ngroups, dev_attr->nas,
157 				   dev_attr->alloc_size,
158 				   dev_attr->name, false);
159 	if (IS_ERR(vdpa)) {
160 		ret = PTR_ERR(vdpa);
161 		goto err_alloc;
162 	}
163 
164 	vdpasim = vdpa_to_sim(vdpa);
165 	vdpasim->dev_attr = *dev_attr;
166 	INIT_WORK(&vdpasim->work, dev_attr->work_fn);
167 	spin_lock_init(&vdpasim->lock);
168 	spin_lock_init(&vdpasim->iommu_lock);
169 
170 	dev = &vdpasim->vdpa.dev;
171 	dev->dma_mask = &dev->coherent_dma_mask;
172 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
173 		goto err_iommu;
174 	vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
175 
176 	vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
177 	if (!vdpasim->config)
178 		goto err_iommu;
179 
180 	vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
181 			       GFP_KERNEL);
182 	if (!vdpasim->vqs)
183 		goto err_iommu;
184 
185 	vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
186 				       sizeof(*vdpasim->iommu), GFP_KERNEL);
187 	if (!vdpasim->iommu)
188 		goto err_iommu;
189 
190 	vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas,
191 					  sizeof(*vdpasim->iommu_pt), GFP_KERNEL);
192 	if (!vdpasim->iommu_pt)
193 		goto err_iommu;
194 
195 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
196 		vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
197 
198 	vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
199 	if (!vdpasim->buffer)
200 		goto err_iommu;
201 
202 	for (i = 0; i < dev_attr->nvqs; i++)
203 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
204 				 &vdpasim->iommu_lock);
205 
206 	vdpasim->vdpa.dma_dev = dev;
207 
208 	return vdpasim;
209 
210 err_iommu:
211 	put_device(dev);
212 err_alloc:
213 	return ERR_PTR(ret);
214 }
215 EXPORT_SYMBOL_GPL(vdpasim_create);
216 
217 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
218 				  u64 desc_area, u64 driver_area,
219 				  u64 device_area)
220 {
221 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
222 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
223 
224 	vq->desc_addr = desc_area;
225 	vq->driver_addr = driver_area;
226 	vq->device_addr = device_area;
227 
228 	return 0;
229 }
230 
231 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
232 {
233 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
234 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
235 
236 	vq->num = num;
237 }
238 
239 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
240 {
241 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
242 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
243 
244 	if (!vdpasim->running &&
245 	    (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
246 		vdpasim->pending_kick = true;
247 		return;
248 	}
249 
250 	if (vq->ready)
251 		schedule_work(&vdpasim->work);
252 }
253 
254 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
255 			      struct vdpa_callback *cb)
256 {
257 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
258 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
259 
260 	vq->cb = cb->callback;
261 	vq->private = cb->private;
262 }
263 
264 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
265 {
266 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
267 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
268 	bool old_ready;
269 
270 	spin_lock(&vdpasim->lock);
271 	old_ready = vq->ready;
272 	vq->ready = ready;
273 	if (vq->ready && !old_ready) {
274 		vdpasim_queue_ready(vdpasim, idx);
275 	}
276 	spin_unlock(&vdpasim->lock);
277 }
278 
279 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
280 {
281 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
282 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
283 
284 	return vq->ready;
285 }
286 
287 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
288 				const struct vdpa_vq_state *state)
289 {
290 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
291 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
292 	struct vringh *vrh = &vq->vring;
293 
294 	spin_lock(&vdpasim->lock);
295 	vrh->last_avail_idx = state->split.avail_index;
296 	spin_unlock(&vdpasim->lock);
297 
298 	return 0;
299 }
300 
301 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
302 				struct vdpa_vq_state *state)
303 {
304 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
305 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
306 	struct vringh *vrh = &vq->vring;
307 
308 	state->split.avail_index = vrh->last_avail_idx;
309 	return 0;
310 }
311 
312 static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
313 				struct sk_buff *msg,
314 				struct netlink_ext_ack *extack)
315 {
316 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
317 
318 	if (vdpasim->dev_attr.get_stats)
319 		return vdpasim->dev_attr.get_stats(vdpasim, idx,
320 						   msg, extack);
321 	return -EOPNOTSUPP;
322 }
323 
324 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
325 {
326 	return VDPASIM_QUEUE_ALIGN;
327 }
328 
329 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
330 {
331 	/* RX and TX belongs to group 0, CVQ belongs to group 1 */
332 	if (idx == 2)
333 		return 1;
334 	else
335 		return 0;
336 }
337 
338 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
339 {
340 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
341 
342 	return vdpasim->dev_attr.supported_features;
343 }
344 
345 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
346 {
347 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
348 
349 	/* DMA mapping must be done by driver */
350 	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
351 		return -EINVAL;
352 
353 	vdpasim->features = features & vdpasim->dev_attr.supported_features;
354 
355 	return 0;
356 }
357 
358 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
359 {
360 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
361 
362 	return vdpasim->features;
363 }
364 
365 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
366 				  struct vdpa_callback *cb)
367 {
368 	/* We don't support config interrupt */
369 }
370 
371 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
372 {
373 	return VDPASIM_QUEUE_MAX;
374 }
375 
376 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
377 {
378 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
379 
380 	return vdpasim->dev_attr.id;
381 }
382 
383 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
384 {
385 	return VDPASIM_VENDOR_ID;
386 }
387 
388 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
389 {
390 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
391 	u8 status;
392 
393 	spin_lock(&vdpasim->lock);
394 	status = vdpasim->status;
395 	spin_unlock(&vdpasim->lock);
396 
397 	return status;
398 }
399 
400 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
401 {
402 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
403 
404 	spin_lock(&vdpasim->lock);
405 	vdpasim->status = status;
406 	spin_unlock(&vdpasim->lock);
407 }
408 
409 static int vdpasim_reset(struct vdpa_device *vdpa)
410 {
411 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
412 
413 	spin_lock(&vdpasim->lock);
414 	vdpasim->status = 0;
415 	vdpasim_do_reset(vdpasim);
416 	spin_unlock(&vdpasim->lock);
417 
418 	return 0;
419 }
420 
421 static int vdpasim_suspend(struct vdpa_device *vdpa)
422 {
423 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
424 
425 	spin_lock(&vdpasim->lock);
426 	vdpasim->running = false;
427 	spin_unlock(&vdpasim->lock);
428 
429 	return 0;
430 }
431 
432 static int vdpasim_resume(struct vdpa_device *vdpa)
433 {
434 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
435 	int i;
436 
437 	spin_lock(&vdpasim->lock);
438 	vdpasim->running = true;
439 
440 	if (vdpasim->pending_kick) {
441 		/* Process pending descriptors */
442 		for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
443 			vdpasim_kick_vq(vdpa, i);
444 
445 		vdpasim->pending_kick = false;
446 	}
447 
448 	spin_unlock(&vdpasim->lock);
449 
450 	return 0;
451 }
452 
453 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
454 {
455 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
456 
457 	return vdpasim->dev_attr.config_size;
458 }
459 
460 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
461 			     void *buf, unsigned int len)
462 {
463 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
464 
465 	if (offset + len > vdpasim->dev_attr.config_size)
466 		return;
467 
468 	if (vdpasim->dev_attr.get_config)
469 		vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
470 
471 	memcpy(buf, vdpasim->config + offset, len);
472 }
473 
474 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
475 			     const void *buf, unsigned int len)
476 {
477 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
478 
479 	if (offset + len > vdpasim->dev_attr.config_size)
480 		return;
481 
482 	memcpy(vdpasim->config + offset, buf, len);
483 
484 	if (vdpasim->dev_attr.set_config)
485 		vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
486 }
487 
488 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
489 {
490 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
491 
492 	return vdpasim->generation;
493 }
494 
495 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
496 {
497 	struct vdpa_iova_range range = {
498 		.first = 0ULL,
499 		.last = ULLONG_MAX,
500 	};
501 
502 	return range;
503 }
504 
505 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
506 				  unsigned int asid)
507 {
508 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
509 	struct vhost_iotlb *iommu;
510 	int i;
511 
512 	if (group > vdpasim->dev_attr.ngroups)
513 		return -EINVAL;
514 
515 	if (asid >= vdpasim->dev_attr.nas)
516 		return -EINVAL;
517 
518 	iommu = &vdpasim->iommu[asid];
519 
520 	spin_lock(&vdpasim->lock);
521 
522 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
523 		if (vdpasim_get_vq_group(vdpa, i) == group)
524 			vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
525 					 &vdpasim->iommu_lock);
526 
527 	spin_unlock(&vdpasim->lock);
528 
529 	return 0;
530 }
531 
532 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
533 			   struct vhost_iotlb *iotlb)
534 {
535 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
536 	struct vhost_iotlb_map *map;
537 	struct vhost_iotlb *iommu;
538 	u64 start = 0ULL, last = 0ULL - 1;
539 	int ret;
540 
541 	if (asid >= vdpasim->dev_attr.nas)
542 		return -EINVAL;
543 
544 	spin_lock(&vdpasim->iommu_lock);
545 
546 	iommu = &vdpasim->iommu[asid];
547 	vhost_iotlb_reset(iommu);
548 	vdpasim->iommu_pt[asid] = false;
549 
550 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
551 	     map = vhost_iotlb_itree_next(map, start, last)) {
552 		ret = vhost_iotlb_add_range(iommu, map->start,
553 					    map->last, map->addr, map->perm);
554 		if (ret)
555 			goto err;
556 	}
557 	spin_unlock(&vdpasim->iommu_lock);
558 	return 0;
559 
560 err:
561 	vhost_iotlb_reset(iommu);
562 	spin_unlock(&vdpasim->iommu_lock);
563 	return ret;
564 }
565 
566 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
567 			   u64 iova, u64 size,
568 			   u64 pa, u32 perm, void *opaque)
569 {
570 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
571 	int ret;
572 
573 	if (asid >= vdpasim->dev_attr.nas)
574 		return -EINVAL;
575 
576 	spin_lock(&vdpasim->iommu_lock);
577 	if (vdpasim->iommu_pt[asid]) {
578 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
579 		vdpasim->iommu_pt[asid] = false;
580 	}
581 	ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
582 					iova + size - 1, pa, perm, opaque);
583 	spin_unlock(&vdpasim->iommu_lock);
584 
585 	return ret;
586 }
587 
588 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
589 			     u64 iova, u64 size)
590 {
591 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
592 
593 	if (asid >= vdpasim->dev_attr.nas)
594 		return -EINVAL;
595 
596 	if (vdpasim->iommu_pt[asid]) {
597 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
598 		vdpasim->iommu_pt[asid] = false;
599 	}
600 
601 	spin_lock(&vdpasim->iommu_lock);
602 	vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
603 	spin_unlock(&vdpasim->iommu_lock);
604 
605 	return 0;
606 }
607 
608 static void vdpasim_free(struct vdpa_device *vdpa)
609 {
610 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
611 	int i;
612 
613 	cancel_work_sync(&vdpasim->work);
614 
615 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
616 		vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
617 		vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
618 	}
619 
620 	kvfree(vdpasim->buffer);
621 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
622 		vhost_iotlb_reset(&vdpasim->iommu[i]);
623 	kfree(vdpasim->iommu);
624 	kfree(vdpasim->iommu_pt);
625 	kfree(vdpasim->vqs);
626 	kfree(vdpasim->config);
627 }
628 
629 static const struct vdpa_config_ops vdpasim_config_ops = {
630 	.set_vq_address         = vdpasim_set_vq_address,
631 	.set_vq_num             = vdpasim_set_vq_num,
632 	.kick_vq                = vdpasim_kick_vq,
633 	.set_vq_cb              = vdpasim_set_vq_cb,
634 	.set_vq_ready           = vdpasim_set_vq_ready,
635 	.get_vq_ready           = vdpasim_get_vq_ready,
636 	.set_vq_state           = vdpasim_set_vq_state,
637 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
638 	.get_vq_state           = vdpasim_get_vq_state,
639 	.get_vq_align           = vdpasim_get_vq_align,
640 	.get_vq_group           = vdpasim_get_vq_group,
641 	.get_device_features    = vdpasim_get_device_features,
642 	.set_driver_features    = vdpasim_set_driver_features,
643 	.get_driver_features    = vdpasim_get_driver_features,
644 	.set_config_cb          = vdpasim_set_config_cb,
645 	.get_vq_num_max         = vdpasim_get_vq_num_max,
646 	.get_device_id          = vdpasim_get_device_id,
647 	.get_vendor_id          = vdpasim_get_vendor_id,
648 	.get_status             = vdpasim_get_status,
649 	.set_status             = vdpasim_set_status,
650 	.reset			= vdpasim_reset,
651 	.suspend		= vdpasim_suspend,
652 	.resume			= vdpasim_resume,
653 	.get_config_size        = vdpasim_get_config_size,
654 	.get_config             = vdpasim_get_config,
655 	.set_config             = vdpasim_set_config,
656 	.get_generation         = vdpasim_get_generation,
657 	.get_iova_range         = vdpasim_get_iova_range,
658 	.set_group_asid         = vdpasim_set_group_asid,
659 	.dma_map                = vdpasim_dma_map,
660 	.dma_unmap              = vdpasim_dma_unmap,
661 	.free                   = vdpasim_free,
662 };
663 
664 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
665 	.set_vq_address         = vdpasim_set_vq_address,
666 	.set_vq_num             = vdpasim_set_vq_num,
667 	.kick_vq                = vdpasim_kick_vq,
668 	.set_vq_cb              = vdpasim_set_vq_cb,
669 	.set_vq_ready           = vdpasim_set_vq_ready,
670 	.get_vq_ready           = vdpasim_get_vq_ready,
671 	.set_vq_state           = vdpasim_set_vq_state,
672 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
673 	.get_vq_state           = vdpasim_get_vq_state,
674 	.get_vq_align           = vdpasim_get_vq_align,
675 	.get_vq_group           = vdpasim_get_vq_group,
676 	.get_device_features    = vdpasim_get_device_features,
677 	.set_driver_features    = vdpasim_set_driver_features,
678 	.get_driver_features    = vdpasim_get_driver_features,
679 	.set_config_cb          = vdpasim_set_config_cb,
680 	.get_vq_num_max         = vdpasim_get_vq_num_max,
681 	.get_device_id          = vdpasim_get_device_id,
682 	.get_vendor_id          = vdpasim_get_vendor_id,
683 	.get_status             = vdpasim_get_status,
684 	.set_status             = vdpasim_set_status,
685 	.reset			= vdpasim_reset,
686 	.suspend		= vdpasim_suspend,
687 	.resume			= vdpasim_resume,
688 	.get_config_size        = vdpasim_get_config_size,
689 	.get_config             = vdpasim_get_config,
690 	.set_config             = vdpasim_set_config,
691 	.get_generation         = vdpasim_get_generation,
692 	.get_iova_range         = vdpasim_get_iova_range,
693 	.set_group_asid         = vdpasim_set_group_asid,
694 	.set_map                = vdpasim_set_map,
695 	.free                   = vdpasim_free,
696 };
697 
698 MODULE_VERSION(DRV_VERSION);
699 MODULE_LICENSE(DRV_LICENSE);
700 MODULE_AUTHOR(DRV_AUTHOR);
701 MODULE_DESCRIPTION(DRV_DESC);
702