xref: /openbmc/linux/drivers/vdpa/vdpa_sim/vdpa_sim.c (revision db66795f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA device simulator core.
4  *
5  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/slab.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <uapi/linux/vdpa.h>
21 
22 #include "vdpa_sim.h"
23 
24 #define DRV_VERSION  "0.1"
25 #define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
26 #define DRV_DESC     "vDPA Device Simulator core"
27 #define DRV_LICENSE  "GPL v2"
28 
29 static int batch_mapping = 1;
30 module_param(batch_mapping, int, 0444);
31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
32 
33 static int max_iotlb_entries = 2048;
34 module_param(max_iotlb_entries, int, 0444);
35 MODULE_PARM_DESC(max_iotlb_entries,
36 		 "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
37 
38 static bool use_va = true;
39 module_param(use_va, bool, 0444);
40 MODULE_PARM_DESC(use_va, "Enable/disable the device's ability to use VA");
41 
42 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
43 #define VDPASIM_QUEUE_MAX 256
44 #define VDPASIM_VENDOR_ID 0
45 
46 struct vdpasim_mm_work {
47 	struct kthread_work work;
48 	struct vdpasim *vdpasim;
49 	struct mm_struct *mm_to_bind;
50 	int ret;
51 };
52 
53 static void vdpasim_mm_work_fn(struct kthread_work *work)
54 {
55 	struct vdpasim_mm_work *mm_work =
56 		container_of(work, struct vdpasim_mm_work, work);
57 	struct vdpasim *vdpasim = mm_work->vdpasim;
58 
59 	mm_work->ret = 0;
60 
61 	//TODO: should we attach the cgroup of the mm owner?
62 	vdpasim->mm_bound = mm_work->mm_to_bind;
63 }
64 
65 static void vdpasim_worker_change_mm_sync(struct vdpasim *vdpasim,
66 					  struct vdpasim_mm_work *mm_work)
67 {
68 	struct kthread_work *work = &mm_work->work;
69 
70 	kthread_init_work(work, vdpasim_mm_work_fn);
71 	kthread_queue_work(vdpasim->worker, work);
72 
73 	kthread_flush_work(work);
74 }
75 
76 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
77 {
78 	return container_of(vdpa, struct vdpasim, vdpa);
79 }
80 
81 static void vdpasim_vq_notify(struct vringh *vring)
82 {
83 	struct vdpasim_virtqueue *vq =
84 		container_of(vring, struct vdpasim_virtqueue, vring);
85 
86 	if (!vq->cb)
87 		return;
88 
89 	vq->cb(vq->private);
90 }
91 
92 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
93 {
94 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
95 	uint16_t last_avail_idx = vq->vring.last_avail_idx;
96 	struct vring_desc *desc = (struct vring_desc *)
97 				  (uintptr_t)vq->desc_addr;
98 	struct vring_avail *avail = (struct vring_avail *)
99 				    (uintptr_t)vq->driver_addr;
100 	struct vring_used *used = (struct vring_used *)
101 				  (uintptr_t)vq->device_addr;
102 
103 	if (use_va && vdpasim->mm_bound) {
104 		vringh_init_iotlb_va(&vq->vring, vdpasim->features, vq->num,
105 				     true, desc, avail, used);
106 	} else {
107 		vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num,
108 				  true, desc, avail, used);
109 	}
110 
111 	vq->vring.last_avail_idx = last_avail_idx;
112 
113 	/*
114 	 * Since vdpa_sim does not support receive inflight descriptors as a
115 	 * destination of a migration, let's set both avail_idx and used_idx
116 	 * the same at vq start.  This is how vhost-user works in a
117 	 * VHOST_SET_VRING_BASE call.
118 	 *
119 	 * Although the simple fix is to set last_used_idx at
120 	 * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
121 	 */
122 	vq->vring.last_used_idx = last_avail_idx;
123 	vq->vring.notify = vdpasim_vq_notify;
124 }
125 
126 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
127 			     struct vdpasim_virtqueue *vq)
128 {
129 	vq->ready = false;
130 	vq->desc_addr = 0;
131 	vq->driver_addr = 0;
132 	vq->device_addr = 0;
133 	vq->cb = NULL;
134 	vq->private = NULL;
135 	vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
136 			  VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
137 
138 	vq->vring.notify = NULL;
139 }
140 
141 static void vdpasim_do_reset(struct vdpasim *vdpasim)
142 {
143 	int i;
144 
145 	spin_lock(&vdpasim->iommu_lock);
146 
147 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
148 		vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
149 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
150 				 &vdpasim->iommu_lock);
151 	}
152 
153 	for (i = 0; i < vdpasim->dev_attr.nas; i++) {
154 		vhost_iotlb_reset(&vdpasim->iommu[i]);
155 		vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
156 				      0, VHOST_MAP_RW);
157 		vdpasim->iommu_pt[i] = true;
158 	}
159 
160 	vdpasim->running = true;
161 	spin_unlock(&vdpasim->iommu_lock);
162 
163 	vdpasim->features = 0;
164 	vdpasim->status = 0;
165 	++vdpasim->generation;
166 }
167 
168 static const struct vdpa_config_ops vdpasim_config_ops;
169 static const struct vdpa_config_ops vdpasim_batch_config_ops;
170 
171 static void vdpasim_work_fn(struct kthread_work *work)
172 {
173 	struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
174 	struct mm_struct *mm = vdpasim->mm_bound;
175 
176 	if (use_va && mm) {
177 		if (!mmget_not_zero(mm))
178 			return;
179 		kthread_use_mm(mm);
180 	}
181 
182 	vdpasim->dev_attr.work_fn(vdpasim);
183 
184 	if (use_va && mm) {
185 		kthread_unuse_mm(mm);
186 		mmput(mm);
187 	}
188 }
189 
190 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
191 			       const struct vdpa_dev_set_config *config)
192 {
193 	const struct vdpa_config_ops *ops;
194 	struct vdpa_device *vdpa;
195 	struct vdpasim *vdpasim;
196 	struct device *dev;
197 	int i, ret = -ENOMEM;
198 
199 	if (!dev_attr->alloc_size)
200 		return ERR_PTR(-EINVAL);
201 
202 	if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
203 		if (config->device_features &
204 		    ~dev_attr->supported_features)
205 			return ERR_PTR(-EINVAL);
206 		dev_attr->supported_features =
207 			config->device_features;
208 	}
209 
210 	if (batch_mapping)
211 		ops = &vdpasim_batch_config_ops;
212 	else
213 		ops = &vdpasim_config_ops;
214 
215 	vdpa = __vdpa_alloc_device(NULL, ops,
216 				   dev_attr->ngroups, dev_attr->nas,
217 				   dev_attr->alloc_size,
218 				   dev_attr->name, use_va);
219 	if (IS_ERR(vdpa)) {
220 		ret = PTR_ERR(vdpa);
221 		goto err_alloc;
222 	}
223 
224 	vdpasim = vdpa_to_sim(vdpa);
225 	vdpasim->dev_attr = *dev_attr;
226 	dev = &vdpasim->vdpa.dev;
227 
228 	kthread_init_work(&vdpasim->work, vdpasim_work_fn);
229 	vdpasim->worker = kthread_create_worker(0, "vDPA sim worker: %s",
230 						dev_attr->name);
231 	if (IS_ERR(vdpasim->worker))
232 		goto err_iommu;
233 
234 	mutex_init(&vdpasim->mutex);
235 	spin_lock_init(&vdpasim->iommu_lock);
236 
237 	dev->dma_mask = &dev->coherent_dma_mask;
238 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
239 		goto err_iommu;
240 	vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
241 
242 	vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
243 	if (!vdpasim->config)
244 		goto err_iommu;
245 
246 	vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
247 			       GFP_KERNEL);
248 	if (!vdpasim->vqs)
249 		goto err_iommu;
250 
251 	vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
252 				       sizeof(*vdpasim->iommu), GFP_KERNEL);
253 	if (!vdpasim->iommu)
254 		goto err_iommu;
255 
256 	vdpasim->iommu_pt = kmalloc_array(vdpasim->dev_attr.nas,
257 					  sizeof(*vdpasim->iommu_pt), GFP_KERNEL);
258 	if (!vdpasim->iommu_pt)
259 		goto err_iommu;
260 
261 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
262 		vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
263 
264 	for (i = 0; i < dev_attr->nvqs; i++)
265 		vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
266 				 &vdpasim->iommu_lock);
267 
268 	vdpasim->vdpa.dma_dev = dev;
269 
270 	return vdpasim;
271 
272 err_iommu:
273 	put_device(dev);
274 err_alloc:
275 	return ERR_PTR(ret);
276 }
277 EXPORT_SYMBOL_GPL(vdpasim_create);
278 
279 void vdpasim_schedule_work(struct vdpasim *vdpasim)
280 {
281 	kthread_queue_work(vdpasim->worker, &vdpasim->work);
282 }
283 EXPORT_SYMBOL_GPL(vdpasim_schedule_work);
284 
285 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
286 				  u64 desc_area, u64 driver_area,
287 				  u64 device_area)
288 {
289 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
290 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
291 
292 	vq->desc_addr = desc_area;
293 	vq->driver_addr = driver_area;
294 	vq->device_addr = device_area;
295 
296 	return 0;
297 }
298 
299 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
300 {
301 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
302 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
303 
304 	vq->num = num;
305 }
306 
307 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
308 {
309 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
310 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
311 
312 	if (!vdpasim->running &&
313 	    (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
314 		vdpasim->pending_kick = true;
315 		return;
316 	}
317 
318 	if (vq->ready)
319 		vdpasim_schedule_work(vdpasim);
320 }
321 
322 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
323 			      struct vdpa_callback *cb)
324 {
325 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
326 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
327 
328 	vq->cb = cb->callback;
329 	vq->private = cb->private;
330 }
331 
332 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
333 {
334 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
335 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
336 	bool old_ready;
337 
338 	mutex_lock(&vdpasim->mutex);
339 	old_ready = vq->ready;
340 	vq->ready = ready;
341 	if (vq->ready && !old_ready) {
342 		vdpasim_queue_ready(vdpasim, idx);
343 	}
344 	mutex_unlock(&vdpasim->mutex);
345 }
346 
347 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
348 {
349 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
350 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
351 
352 	return vq->ready;
353 }
354 
355 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
356 				const struct vdpa_vq_state *state)
357 {
358 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
359 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
360 	struct vringh *vrh = &vq->vring;
361 
362 	mutex_lock(&vdpasim->mutex);
363 	vrh->last_avail_idx = state->split.avail_index;
364 	mutex_unlock(&vdpasim->mutex);
365 
366 	return 0;
367 }
368 
369 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
370 				struct vdpa_vq_state *state)
371 {
372 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
373 	struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
374 	struct vringh *vrh = &vq->vring;
375 
376 	state->split.avail_index = vrh->last_avail_idx;
377 	return 0;
378 }
379 
380 static int vdpasim_get_vq_stats(struct vdpa_device *vdpa, u16 idx,
381 				struct sk_buff *msg,
382 				struct netlink_ext_ack *extack)
383 {
384 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
385 
386 	if (vdpasim->dev_attr.get_stats)
387 		return vdpasim->dev_attr.get_stats(vdpasim, idx,
388 						   msg, extack);
389 	return -EOPNOTSUPP;
390 }
391 
392 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
393 {
394 	return VDPASIM_QUEUE_ALIGN;
395 }
396 
397 static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
398 {
399 	/* RX and TX belongs to group 0, CVQ belongs to group 1 */
400 	if (idx == 2)
401 		return 1;
402 	else
403 		return 0;
404 }
405 
406 static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
407 {
408 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
409 
410 	return vdpasim->dev_attr.supported_features;
411 }
412 
413 static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
414 {
415 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
416 
417 	/* DMA mapping must be done by driver */
418 	if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
419 		return -EINVAL;
420 
421 	vdpasim->features = features & vdpasim->dev_attr.supported_features;
422 
423 	return 0;
424 }
425 
426 static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
427 {
428 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
429 
430 	return vdpasim->features;
431 }
432 
433 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
434 				  struct vdpa_callback *cb)
435 {
436 	/* We don't support config interrupt */
437 }
438 
439 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
440 {
441 	return VDPASIM_QUEUE_MAX;
442 }
443 
444 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
445 {
446 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
447 
448 	return vdpasim->dev_attr.id;
449 }
450 
451 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
452 {
453 	return VDPASIM_VENDOR_ID;
454 }
455 
456 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
457 {
458 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
459 	u8 status;
460 
461 	mutex_lock(&vdpasim->mutex);
462 	status = vdpasim->status;
463 	mutex_unlock(&vdpasim->mutex);
464 
465 	return status;
466 }
467 
468 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
469 {
470 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
471 
472 	mutex_lock(&vdpasim->mutex);
473 	vdpasim->status = status;
474 	mutex_unlock(&vdpasim->mutex);
475 }
476 
477 static int vdpasim_reset(struct vdpa_device *vdpa)
478 {
479 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
480 
481 	mutex_lock(&vdpasim->mutex);
482 	vdpasim->status = 0;
483 	vdpasim_do_reset(vdpasim);
484 	mutex_unlock(&vdpasim->mutex);
485 
486 	return 0;
487 }
488 
489 static int vdpasim_suspend(struct vdpa_device *vdpa)
490 {
491 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
492 
493 	mutex_lock(&vdpasim->mutex);
494 	vdpasim->running = false;
495 	mutex_unlock(&vdpasim->mutex);
496 
497 	return 0;
498 }
499 
500 static int vdpasim_resume(struct vdpa_device *vdpa)
501 {
502 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
503 	int i;
504 
505 	mutex_lock(&vdpasim->mutex);
506 	vdpasim->running = true;
507 
508 	if (vdpasim->pending_kick) {
509 		/* Process pending descriptors */
510 		for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
511 			vdpasim_kick_vq(vdpa, i);
512 
513 		vdpasim->pending_kick = false;
514 	}
515 
516 	mutex_unlock(&vdpasim->mutex);
517 
518 	return 0;
519 }
520 
521 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
522 {
523 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
524 
525 	return vdpasim->dev_attr.config_size;
526 }
527 
528 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
529 			     void *buf, unsigned int len)
530 {
531 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
532 
533 	if (offset + len > vdpasim->dev_attr.config_size)
534 		return;
535 
536 	if (vdpasim->dev_attr.get_config)
537 		vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
538 
539 	memcpy(buf, vdpasim->config + offset, len);
540 }
541 
542 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
543 			     const void *buf, unsigned int len)
544 {
545 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
546 
547 	if (offset + len > vdpasim->dev_attr.config_size)
548 		return;
549 
550 	memcpy(vdpasim->config + offset, buf, len);
551 
552 	if (vdpasim->dev_attr.set_config)
553 		vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
554 }
555 
556 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
557 {
558 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
559 
560 	return vdpasim->generation;
561 }
562 
563 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
564 {
565 	struct vdpa_iova_range range = {
566 		.first = 0ULL,
567 		.last = ULLONG_MAX,
568 	};
569 
570 	return range;
571 }
572 
573 static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
574 				  unsigned int asid)
575 {
576 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
577 	struct vhost_iotlb *iommu;
578 	int i;
579 
580 	if (group > vdpasim->dev_attr.ngroups)
581 		return -EINVAL;
582 
583 	if (asid >= vdpasim->dev_attr.nas)
584 		return -EINVAL;
585 
586 	iommu = &vdpasim->iommu[asid];
587 
588 	mutex_lock(&vdpasim->mutex);
589 
590 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
591 		if (vdpasim_get_vq_group(vdpa, i) == group)
592 			vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
593 					 &vdpasim->iommu_lock);
594 
595 	mutex_unlock(&vdpasim->mutex);
596 
597 	return 0;
598 }
599 
600 static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
601 			   struct vhost_iotlb *iotlb)
602 {
603 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
604 	struct vhost_iotlb_map *map;
605 	struct vhost_iotlb *iommu;
606 	u64 start = 0ULL, last = 0ULL - 1;
607 	int ret;
608 
609 	if (asid >= vdpasim->dev_attr.nas)
610 		return -EINVAL;
611 
612 	spin_lock(&vdpasim->iommu_lock);
613 
614 	iommu = &vdpasim->iommu[asid];
615 	vhost_iotlb_reset(iommu);
616 	vdpasim->iommu_pt[asid] = false;
617 
618 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
619 	     map = vhost_iotlb_itree_next(map, start, last)) {
620 		ret = vhost_iotlb_add_range(iommu, map->start,
621 					    map->last, map->addr, map->perm);
622 		if (ret)
623 			goto err;
624 	}
625 	spin_unlock(&vdpasim->iommu_lock);
626 	return 0;
627 
628 err:
629 	vhost_iotlb_reset(iommu);
630 	spin_unlock(&vdpasim->iommu_lock);
631 	return ret;
632 }
633 
634 static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
635 {
636 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
637 	struct vdpasim_mm_work mm_work;
638 
639 	mm_work.vdpasim = vdpasim;
640 	mm_work.mm_to_bind = mm;
641 
642 	vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
643 
644 	return mm_work.ret;
645 }
646 
647 static void vdpasim_unbind_mm(struct vdpa_device *vdpa)
648 {
649 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
650 	struct vdpasim_mm_work mm_work;
651 
652 	mm_work.vdpasim = vdpasim;
653 	mm_work.mm_to_bind = NULL;
654 
655 	vdpasim_worker_change_mm_sync(vdpasim, &mm_work);
656 }
657 
658 static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
659 			   u64 iova, u64 size,
660 			   u64 pa, u32 perm, void *opaque)
661 {
662 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
663 	int ret;
664 
665 	if (asid >= vdpasim->dev_attr.nas)
666 		return -EINVAL;
667 
668 	spin_lock(&vdpasim->iommu_lock);
669 	if (vdpasim->iommu_pt[asid]) {
670 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
671 		vdpasim->iommu_pt[asid] = false;
672 	}
673 	ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
674 					iova + size - 1, pa, perm, opaque);
675 	spin_unlock(&vdpasim->iommu_lock);
676 
677 	return ret;
678 }
679 
680 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
681 			     u64 iova, u64 size)
682 {
683 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
684 
685 	if (asid >= vdpasim->dev_attr.nas)
686 		return -EINVAL;
687 
688 	if (vdpasim->iommu_pt[asid]) {
689 		vhost_iotlb_reset(&vdpasim->iommu[asid]);
690 		vdpasim->iommu_pt[asid] = false;
691 	}
692 
693 	spin_lock(&vdpasim->iommu_lock);
694 	vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
695 	spin_unlock(&vdpasim->iommu_lock);
696 
697 	return 0;
698 }
699 
700 static void vdpasim_free(struct vdpa_device *vdpa)
701 {
702 	struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
703 	int i;
704 
705 	kthread_cancel_work_sync(&vdpasim->work);
706 	kthread_destroy_worker(vdpasim->worker);
707 
708 	for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
709 		vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
710 		vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
711 	}
712 
713 	vdpasim->dev_attr.free(vdpasim);
714 
715 	for (i = 0; i < vdpasim->dev_attr.nas; i++)
716 		vhost_iotlb_reset(&vdpasim->iommu[i]);
717 	kfree(vdpasim->iommu);
718 	kfree(vdpasim->iommu_pt);
719 	kfree(vdpasim->vqs);
720 	kfree(vdpasim->config);
721 }
722 
723 static const struct vdpa_config_ops vdpasim_config_ops = {
724 	.set_vq_address         = vdpasim_set_vq_address,
725 	.set_vq_num             = vdpasim_set_vq_num,
726 	.kick_vq                = vdpasim_kick_vq,
727 	.set_vq_cb              = vdpasim_set_vq_cb,
728 	.set_vq_ready           = vdpasim_set_vq_ready,
729 	.get_vq_ready           = vdpasim_get_vq_ready,
730 	.set_vq_state           = vdpasim_set_vq_state,
731 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
732 	.get_vq_state           = vdpasim_get_vq_state,
733 	.get_vq_align           = vdpasim_get_vq_align,
734 	.get_vq_group           = vdpasim_get_vq_group,
735 	.get_device_features    = vdpasim_get_device_features,
736 	.set_driver_features    = vdpasim_set_driver_features,
737 	.get_driver_features    = vdpasim_get_driver_features,
738 	.set_config_cb          = vdpasim_set_config_cb,
739 	.get_vq_num_max         = vdpasim_get_vq_num_max,
740 	.get_device_id          = vdpasim_get_device_id,
741 	.get_vendor_id          = vdpasim_get_vendor_id,
742 	.get_status             = vdpasim_get_status,
743 	.set_status             = vdpasim_set_status,
744 	.reset			= vdpasim_reset,
745 	.suspend		= vdpasim_suspend,
746 	.resume			= vdpasim_resume,
747 	.get_config_size        = vdpasim_get_config_size,
748 	.get_config             = vdpasim_get_config,
749 	.set_config             = vdpasim_set_config,
750 	.get_generation         = vdpasim_get_generation,
751 	.get_iova_range         = vdpasim_get_iova_range,
752 	.set_group_asid         = vdpasim_set_group_asid,
753 	.dma_map                = vdpasim_dma_map,
754 	.dma_unmap              = vdpasim_dma_unmap,
755 	.bind_mm		= vdpasim_bind_mm,
756 	.unbind_mm		= vdpasim_unbind_mm,
757 	.free                   = vdpasim_free,
758 };
759 
760 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
761 	.set_vq_address         = vdpasim_set_vq_address,
762 	.set_vq_num             = vdpasim_set_vq_num,
763 	.kick_vq                = vdpasim_kick_vq,
764 	.set_vq_cb              = vdpasim_set_vq_cb,
765 	.set_vq_ready           = vdpasim_set_vq_ready,
766 	.get_vq_ready           = vdpasim_get_vq_ready,
767 	.set_vq_state           = vdpasim_set_vq_state,
768 	.get_vendor_vq_stats    = vdpasim_get_vq_stats,
769 	.get_vq_state           = vdpasim_get_vq_state,
770 	.get_vq_align           = vdpasim_get_vq_align,
771 	.get_vq_group           = vdpasim_get_vq_group,
772 	.get_device_features    = vdpasim_get_device_features,
773 	.set_driver_features    = vdpasim_set_driver_features,
774 	.get_driver_features    = vdpasim_get_driver_features,
775 	.set_config_cb          = vdpasim_set_config_cb,
776 	.get_vq_num_max         = vdpasim_get_vq_num_max,
777 	.get_device_id          = vdpasim_get_device_id,
778 	.get_vendor_id          = vdpasim_get_vendor_id,
779 	.get_status             = vdpasim_get_status,
780 	.set_status             = vdpasim_set_status,
781 	.reset			= vdpasim_reset,
782 	.suspend		= vdpasim_suspend,
783 	.resume			= vdpasim_resume,
784 	.get_config_size        = vdpasim_get_config_size,
785 	.get_config             = vdpasim_get_config,
786 	.set_config             = vdpasim_set_config,
787 	.get_generation         = vdpasim_get_generation,
788 	.get_iova_range         = vdpasim_get_iova_range,
789 	.set_group_asid         = vdpasim_set_group_asid,
790 	.set_map                = vdpasim_set_map,
791 	.bind_mm		= vdpasim_bind_mm,
792 	.unbind_mm		= vdpasim_unbind_mm,
793 	.free                   = vdpasim_free,
794 };
795 
796 MODULE_VERSION(DRV_VERSION);
797 MODULE_LICENSE(DRV_LICENSE);
798 MODULE_AUTHOR(DRV_AUTHOR);
799 MODULE_DESCRIPTION(DRV_DESC);
800