xref: /openbmc/linux/drivers/vdpa/vdpa_sim/vdpa_sim.c (revision a72b9869)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA device simulator core.
4  *
5  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <uapi/linux/vdpa.h>
21 
22 #include "vdpa_sim.h"
23 
24 #define DRV_VERSION  "0.1"
25 #define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
26 #define DRV_DESC     "vDPA Device Simulator core"
27 #define DRV_LICENSE  "GPL v2"
28 
29 static int batch_mapping = 1;
30 module_param(batch_mapping, int, 0444);
31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
32 
33 static int max_iotlb_entries = 2048;
34 module_param(max_iotlb_entries, int, 0444);
35 MODULE_PARM_DESC(max_iotlb_entries,
36 		 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
37 
38 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
39 #define VDPASIM_QUEUE_MAX 256
40 #define VDPASIM_VENDOR_ID 0
41 
42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
43 {
44 	return container_of(vdpa, struct vdpasim, vdpa);
45 }
46 
47 static void vdpasim_vq_notify(struct vringh *vring)
48 {
49 	struct vdpasim_virtqueue *vq =
50 		container_of(vring, struct vdpasim_virtqueue, vring);
51 
52 	if (!vq->cb)
53 		return;
54 
55 	vq->cb(vq->private);
56 }
57 
58 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
59 {
60 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
61 	uint16_t last_avail_idx = vq->vring.last_avail_idx;
62 
63 	vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, true,
64 			  (struct vring_desc *)(uintptr_t)vq->desc_addr,
65 			  (struct vring_avail *)
66 			  (uintptr_t)vq->driver_addr,
67 			  (struct vring_used *)
68 			  (uintptr_t)vq->device_addr);
69 
70 	vq->vring.last_avail_idx = last_avail_idx;
71 	vq->vring.notify = vdpasim_vq_notify;
72 }
73 
74 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
75 			     struct vdpasim_virtqueue *vq)
76 {
77 	vq->ready = false;
78 	vq->desc_addr = 0;
79 	vq->driver_addr = 0;
80 	vq->device_addr = 0;
81 	vq->cb = NULL;
82 	vq->private = NULL;
83 	vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
84 			  VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
85 
86 	vq->vring.notify = NULL;
87 }
88 
89 static void vdpasim_do_reset(struct vdpasim *vdpasim)
90 {
91 	int i;
92 
93 	spin_lock(&vdpasim->iommu_lock);
94 
95 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
96 		vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
97 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
98 				 &vdpasim->iommu_lock);
99 	}
100 
101 	for (i = 0; i < vdpasim->dev_attr.nas; i++) {
102 		vhost_iotlb_reset(&vdpasim->iommu[i]);
103 		vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
104 				      0, VHOST_MAP_RW);
105 		vdpasim->iommu_pt[i] = true;
106 	}
107 
108 	vdpasim->running = true;
109 	spin_unlock(&vdpasim->iommu_lock);
110 
111 	vdpasim->features = 0;
112 	vdpasim->status = 0;
113 	++vdpasim->generation;
114 }
115 
116 static const struct vdpa_config_ops vdpasim_config_ops;
117 static const struct vdpa_config_ops vdpasim_batch_config_ops;
118 
119 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
120 			       const struct vdpa_dev_set_config *config)
121 {
122 	const struct vdpa_config_ops *ops;
123 	struct vdpa_device *vdpa;
124 	struct vdpasim *vdpasim;
125 	struct device *dev;
126 	int i, ret = -ENOMEM;
127 
128 	if (!dev_attr->alloc_size)
129 		return ERR_PTR(-EINVAL);
130 
131 	if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
132 		if (config->device_features &
133 		    ~dev_attr->supported_features)
134 			return ERR_PTR(-EINVAL);
135 		dev_attr->supported_features =
136 			config->device_features;
137 	}
138 
139 	if (batch_mapping)
140 		ops = &vdpasim_batch_config_ops;
141 	else
142 		ops = &vdpasim_config_ops;
143 
144 	vdpa = __vdpa_alloc_device(NULL, ops,
145 				   dev_attr->ngroups, dev_attr->nas,
146 				   dev_attr->alloc_size,
147 				   dev_attr->name, false);
148 	if (IS_ERR(vdpa)) {
149 		ret = PTR_ERR(vdpa);
150 		goto err_alloc;
151 	}
152 
153 	vdpasim = vdpa_to_sim(vdpa);
154 	vdpasim->dev_attr = *dev_attr;
155 	INIT_WORK(&vdpasim->work, dev_attr->work_fn);
156 	spin_lock_init(&vdpasim->lock);
157 	spin_lock_init(&vdpasim->iommu_lock);
158 
159 	dev = &vdpasim->vdpa.dev;
160 	dev->dma_mask = &dev->coherent_dma_mask;
161 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
162 		goto err_iommu;
163 	vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
164 
165 	vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
166 	if (!vdpasim->config)
167 		goto err_iommu;
168 
169 	vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
170 			       GFP_KERNEL);
171 	if (!vdpasim->vqs)
172 		goto err_iommu;
173 
174 	vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
175 				       sizeof(*vdpasim->iommu), GFP_KERNEL);
176 	if (!vdpasim->iommu)
177 		goto err_iommu;
178 
179 	vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas,
180 					  sizeof(*vdpasim->iommu_pt), GFP_KERNEL);
181 	if (!vdpasim->iommu_pt)
182 		goto err_iommu;
183 
184 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
185 		vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
186 
187 	vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
188 	if (!vdpasim->buffer)
189 		goto err_iommu;
190 
191 	for (i = 0; i < dev_attr->nvqs; i++)
192 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
193 				 &vdpasim->iommu_lock);
194 
195 	vdpasim->vdpa.dma_dev = dev;
196 
197 	return vdpasim;
198 
199 err_iommu:
200 	put_device(dev);
201 err_alloc:
202 	return ERR_PTR(ret);
203 }
204 EXPORT_SYMBOL_GPL(vdpasim_create);
205 
206 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
207 				  u64 desc_area, u64 driver_area,
208 				  u64 device_area)
209 {
210 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
211 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
212 
213 	vq->desc_addr = desc_area;
214 	vq->driver_addr = driver_area;
215 	vq->device_addr = device_area;
216 
217 	return 0;
218 }
219 
220 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
221 {
222 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
223 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
224 
225 	vq->num = num;
226 }
227 
228 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
229 {
230 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
231 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
232 
233 	if (!vdpasim->running &&
234 	    (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
235 		vdpasim->pending_kick = true;
236 		return;
237 	}
238 
239 	if (vq->ready)
240 		schedule_work(&vdpasim->work);
241 }
242 
243 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
244 			      struct vdpa_callback *cb)
245 {
246 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
247 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
248 
249 	vq->cb = cb->callback;
250 	vq->private = cb->private;
251 }
252 
253 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
254 {
255 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
256 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
257 	bool old_ready;
258 
259 	spin_lock(&vdpasim->lock);
260 	old_ready = vq->ready;
261 	vq->ready = ready;
262 	if (vq->ready && !old_ready) {
263 		vdpasim_queue_ready(vdpasim, idx);
264 	}
265 	spin_unlock(&vdpasim->lock);
266 }
267 
268 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
269 {
270 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
271 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
272 
273 	return vq->ready;
274 }
275 
276 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
277 				const struct vdpa_vq_state *state)
278 {
279 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
280 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
281 	struct vringh *vrh = &vq->vring;
282 
283 	spin_lock(&vdpasim->lock);
284 	vrh->last_avail_idx = state->split.avail_index;
285 	spin_unlock(&vdpasim->lock);
286 
287 	return 0;
288 }
289 
290 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
291 				struct vdpa_vq_state *state)
292 {
293 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
294 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
295 	struct vringh *vrh = &vq->vring;
296 
297 	state->split.avail_index = vrh->last_avail_idx;
298 	return 0;
299 }
300 
301 static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
302 				struct sk_buff *msg,
303 				struct netlink_ext_ack *extack)
304 {
305 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
306 
307 	if (vdpasim->dev_attr.get_stats)
308 		return vdpasim->dev_attr.get_stats(vdpasim, idx,
309 						   msg, extack);
310 	return -EOPNOTSUPP;
311 }
312 
313 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
314 {
315 	return VDPASIM_QUEUE_ALIGN;
316 }
317 
318 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
319 {
320 	/* RX and TX belongs to group 0, CVQ belongs to group 1 */
321 	if (idx == 2)
322 		return 1;
323 	else
324 		return 0;
325 }
326 
327 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
328 {
329 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
330 
331 	return vdpasim->dev_attr.supported_features;
332 }
333 
334 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
335 {
336 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
337 
338 	/* DMA mapping must be done by driver */
339 	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
340 		return -EINVAL;
341 
342 	vdpasim->features = features & vdpasim->dev_attr.supported_features;
343 
344 	return 0;
345 }
346 
347 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
348 {
349 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
350 
351 	return vdpasim->features;
352 }
353 
354 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
355 				  struct vdpa_callback *cb)
356 {
357 	/* We don't support config interrupt */
358 }
359 
360 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
361 {
362 	return VDPASIM_QUEUE_MAX;
363 }
364 
365 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
366 {
367 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
368 
369 	return vdpasim->dev_attr.id;
370 }
371 
372 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
373 {
374 	return VDPASIM_VENDOR_ID;
375 }
376 
377 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
378 {
379 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
380 	u8 status;
381 
382 	spin_lock(&vdpasim->lock);
383 	status = vdpasim->status;
384 	spin_unlock(&vdpasim->lock);
385 
386 	return status;
387 }
388 
389 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
390 {
391 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
392 
393 	spin_lock(&vdpasim->lock);
394 	vdpasim->status = status;
395 	spin_unlock(&vdpasim->lock);
396 }
397 
398 static int vdpasim_reset(struct vdpa_device *vdpa)
399 {
400 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
401 
402 	spin_lock(&vdpasim->lock);
403 	vdpasim->status = 0;
404 	vdpasim_do_reset(vdpasim);
405 	spin_unlock(&vdpasim->lock);
406 
407 	return 0;
408 }
409 
410 static int vdpasim_suspend(struct vdpa_device *vdpa)
411 {
412 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
413 
414 	spin_lock(&vdpasim->lock);
415 	vdpasim->running = false;
416 	spin_unlock(&vdpasim->lock);
417 
418 	return 0;
419 }
420 
421 static int vdpasim_resume(struct vdpa_device *vdpa)
422 {
423 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
424 	int i;
425 
426 	spin_lock(&vdpasim->lock);
427 	vdpasim->running = true;
428 
429 	if (vdpasim->pending_kick) {
430 		/* Process pending descriptors */
431 		for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
432 			vdpasim_kick_vq(vdpa, i);
433 
434 		vdpasim->pending_kick = false;
435 	}
436 
437 	spin_unlock(&vdpasim->lock);
438 
439 	return 0;
440 }
441 
442 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
443 {
444 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
445 
446 	return vdpasim->dev_attr.config_size;
447 }
448 
449 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
450 			     void *buf, unsigned int len)
451 {
452 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
453 
454 	if (offset + len > vdpasim->dev_attr.config_size)
455 		return;
456 
457 	if (vdpasim->dev_attr.get_config)
458 		vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
459 
460 	memcpy(buf, vdpasim->config + offset, len);
461 }
462 
463 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
464 			     const void *buf, unsigned int len)
465 {
466 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
467 
468 	if (offset + len > vdpasim->dev_attr.config_size)
469 		return;
470 
471 	memcpy(vdpasim->config + offset, buf, len);
472 
473 	if (vdpasim->dev_attr.set_config)
474 		vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
475 }
476 
477 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
478 {
479 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
480 
481 	return vdpasim->generation;
482 }
483 
484 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
485 {
486 	struct vdpa_iova_range range = {
487 		.first = 0ULL,
488 		.last = ULLONG_MAX,
489 	};
490 
491 	return range;
492 }
493 
494 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
495 				  unsigned int asid)
496 {
497 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
498 	struct vhost_iotlb *iommu;
499 	int i;
500 
501 	if (group > vdpasim->dev_attr.ngroups)
502 		return -EINVAL;
503 
504 	if (asid >= vdpasim->dev_attr.nas)
505 		return -EINVAL;
506 
507 	iommu = &vdpasim->iommu[asid];
508 
509 	spin_lock(&vdpasim->lock);
510 
511 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
512 		if (vdpasim_get_vq_group(vdpa, i) == group)
513 			vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
514 					 &vdpasim->iommu_lock);
515 
516 	spin_unlock(&vdpasim->lock);
517 
518 	return 0;
519 }
520 
521 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
522 			   struct vhost_iotlb *iotlb)
523 {
524 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
525 	struct vhost_iotlb_map *map;
526 	struct vhost_iotlb *iommu;
527 	u64 start = 0ULL, last = 0ULL - 1;
528 	int ret;
529 
530 	if (asid >= vdpasim->dev_attr.nas)
531 		return -EINVAL;
532 
533 	spin_lock(&vdpasim->iommu_lock);
534 
535 	iommu = &vdpasim->iommu[asid];
536 	vhost_iotlb_reset(iommu);
537 	vdpasim->iommu_pt[asid] = false;
538 
539 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
540 	     map = vhost_iotlb_itree_next(map, start, last)) {
541 		ret = vhost_iotlb_add_range(iommu, map->start,
542 					    map->last, map->addr, map->perm);
543 		if (ret)
544 			goto err;
545 	}
546 	spin_unlock(&vdpasim->iommu_lock);
547 	return 0;
548 
549 err:
550 	vhost_iotlb_reset(iommu);
551 	spin_unlock(&vdpasim->iommu_lock);
552 	return ret;
553 }
554 
555 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
556 			   u64 iova, u64 size,
557 			   u64 pa, u32 perm, void *opaque)
558 {
559 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
560 	int ret;
561 
562 	if (asid >= vdpasim->dev_attr.nas)
563 		return -EINVAL;
564 
565 	spin_lock(&vdpasim->iommu_lock);
566 	if (vdpasim->iommu_pt[asid]) {
567 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
568 		vdpasim->iommu_pt[asid] = false;
569 	}
570 	ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
571 					iova + size - 1, pa, perm, opaque);
572 	spin_unlock(&vdpasim->iommu_lock);
573 
574 	return ret;
575 }
576 
577 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
578 			     u64 iova, u64 size)
579 {
580 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
581 
582 	if (asid >= vdpasim->dev_attr.nas)
583 		return -EINVAL;
584 
585 	if (vdpasim->iommu_pt[asid]) {
586 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
587 		vdpasim->iommu_pt[asid] = false;
588 	}
589 
590 	spin_lock(&vdpasim->iommu_lock);
591 	vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
592 	spin_unlock(&vdpasim->iommu_lock);
593 
594 	return 0;
595 }
596 
597 static void vdpasim_free(struct vdpa_device *vdpa)
598 {
599 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
600 	int i;
601 
602 	cancel_work_sync(&vdpasim->work);
603 
604 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
605 		vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
606 		vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
607 	}
608 
609 	kvfree(vdpasim->buffer);
610 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
611 		vhost_iotlb_reset(&vdpasim->iommu[i]);
612 	kfree(vdpasim->iommu);
613 	kfree(vdpasim->iommu_pt);
614 	kfree(vdpasim->vqs);
615 	kfree(vdpasim->config);
616 }
617 
618 static const struct vdpa_config_ops vdpasim_config_ops = {
619 	.set_vq_address         = vdpasim_set_vq_address,
620 	.set_vq_num             = vdpasim_set_vq_num,
621 	.kick_vq                = vdpasim_kick_vq,
622 	.set_vq_cb              = vdpasim_set_vq_cb,
623 	.set_vq_ready           = vdpasim_set_vq_ready,
624 	.get_vq_ready           = vdpasim_get_vq_ready,
625 	.set_vq_state           = vdpasim_set_vq_state,
626 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
627 	.get_vq_state           = vdpasim_get_vq_state,
628 	.get_vq_align           = vdpasim_get_vq_align,
629 	.get_vq_group           = vdpasim_get_vq_group,
630 	.get_device_features    = vdpasim_get_device_features,
631 	.set_driver_features    = vdpasim_set_driver_features,
632 	.get_driver_features    = vdpasim_get_driver_features,
633 	.set_config_cb          = vdpasim_set_config_cb,
634 	.get_vq_num_max         = vdpasim_get_vq_num_max,
635 	.get_device_id          = vdpasim_get_device_id,
636 	.get_vendor_id          = vdpasim_get_vendor_id,
637 	.get_status             = vdpasim_get_status,
638 	.set_status             = vdpasim_set_status,
639 	.reset			= vdpasim_reset,
640 	.suspend		= vdpasim_suspend,
641 	.resume			= vdpasim_resume,
642 	.get_config_size        = vdpasim_get_config_size,
643 	.get_config             = vdpasim_get_config,
644 	.set_config             = vdpasim_set_config,
645 	.get_generation         = vdpasim_get_generation,
646 	.get_iova_range         = vdpasim_get_iova_range,
647 	.set_group_asid         = vdpasim_set_group_asid,
648 	.dma_map                = vdpasim_dma_map,
649 	.dma_unmap              = vdpasim_dma_unmap,
650 	.free                   = vdpasim_free,
651 };
652 
653 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
654 	.set_vq_address         = vdpasim_set_vq_address,
655 	.set_vq_num             = vdpasim_set_vq_num,
656 	.kick_vq                = vdpasim_kick_vq,
657 	.set_vq_cb              = vdpasim_set_vq_cb,
658 	.set_vq_ready           = vdpasim_set_vq_ready,
659 	.get_vq_ready           = vdpasim_get_vq_ready,
660 	.set_vq_state           = vdpasim_set_vq_state,
661 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
662 	.get_vq_state           = vdpasim_get_vq_state,
663 	.get_vq_align           = vdpasim_get_vq_align,
664 	.get_vq_group           = vdpasim_get_vq_group,
665 	.get_device_features    = vdpasim_get_device_features,
666 	.set_driver_features    = vdpasim_set_driver_features,
667 	.get_driver_features    = vdpasim_get_driver_features,
668 	.set_config_cb          = vdpasim_set_config_cb,
669 	.get_vq_num_max         = vdpasim_get_vq_num_max,
670 	.get_device_id          = vdpasim_get_device_id,
671 	.get_vendor_id          = vdpasim_get_vendor_id,
672 	.get_status             = vdpasim_get_status,
673 	.set_status             = vdpasim_set_status,
674 	.reset			= vdpasim_reset,
675 	.suspend		= vdpasim_suspend,
676 	.resume			= vdpasim_resume,
677 	.get_config_size        = vdpasim_get_config_size,
678 	.get_config             = vdpasim_get_config,
679 	.set_config             = vdpasim_set_config,
680 	.get_generation         = vdpasim_get_generation,
681 	.get_iova_range         = vdpasim_get_iova_range,
682 	.set_group_asid         = vdpasim_set_group_asid,
683 	.set_map                = vdpasim_set_map,
684 	.free                   = vdpasim_free,
685 };
686 
687 MODULE_VERSION(DRV_VERSION);
688 MODULE_LICENSE(DRV_LICENSE);
689 MODULE_AUTHOR(DRV_AUTHOR);
690 MODULE_DESCRIPTION(DRV_DESC);
691