1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * Copyright 2013-2016 Freescale Semiconductor Inc.
4  * Copyright 2016-2017,2019-2020 NXP
5  */
6 
7 #include <linux/device.h>
8 #include <linux/iommu.h>
9 #include <linux/module.h>
10 #include <linux/mutex.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/vfio.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/delay.h>
16 #include <linux/io-64-nonatomic-hi-lo.h>
17 
18 #include "vfio_fsl_mc_private.h"
19 
20 static struct fsl_mc_driver vfio_fsl_mc_driver;
21 
22 static DEFINE_MUTEX(reflck_lock);
23 
24 static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
25 {
26 	kref_get(&reflck->kref);
27 }
28 
29 static void vfio_fsl_mc_reflck_release(struct kref *kref)
30 {
31 	struct vfio_fsl_mc_reflck *reflck = container_of(kref,
32 						      struct vfio_fsl_mc_reflck,
33 						      kref);
34 
35 	mutex_destroy(&reflck->lock);
36 	kfree(reflck);
37 	mutex_unlock(&reflck_lock);
38 }
39 
40 static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
41 {
42 	kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
43 }
44 
45 static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
46 {
47 	struct vfio_fsl_mc_reflck *reflck;
48 
49 	reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
50 	if (!reflck)
51 		return ERR_PTR(-ENOMEM);
52 
53 	kref_init(&reflck->kref);
54 	mutex_init(&reflck->lock);
55 
56 	return reflck;
57 }
58 
59 static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
60 {
61 	int ret = 0;
62 
63 	mutex_lock(&reflck_lock);
64 	if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
65 		vdev->reflck = vfio_fsl_mc_reflck_alloc();
66 		ret = PTR_ERR_OR_ZERO(vdev->reflck);
67 	} else {
68 		struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
69 		struct vfio_device *device;
70 		struct vfio_fsl_mc_device *cont_vdev;
71 
72 		device = vfio_device_get_from_dev(mc_cont_dev);
73 		if (!device) {
74 			ret = -ENODEV;
75 			goto unlock;
76 		}
77 
78 		cont_vdev = vfio_device_data(device);
79 		if (!cont_vdev || !cont_vdev->reflck) {
80 			vfio_device_put(device);
81 			ret = -ENODEV;
82 			goto unlock;
83 		}
84 		vfio_fsl_mc_reflck_get(cont_vdev->reflck);
85 		vdev->reflck = cont_vdev->reflck;
86 		vfio_device_put(device);
87 	}
88 
89 unlock:
90 	mutex_unlock(&reflck_lock);
91 	return ret;
92 }
93 
94 static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
95 {
96 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
97 	int count = mc_dev->obj_desc.region_count;
98 	int i;
99 
100 	vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
101 				GFP_KERNEL);
102 	if (!vdev->regions)
103 		return -ENOMEM;
104 
105 	for (i = 0; i < count; i++) {
106 		struct resource *res = &mc_dev->regions[i];
107 		int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
108 
109 		vdev->regions[i].addr = res->start;
110 		vdev->regions[i].size = resource_size(res);
111 		vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
112 		/*
113 		 * Only regions addressed with PAGE granularity may be
114 		 * MMAPed securely.
115 		 */
116 		if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
117 				!(vdev->regions[i].size & ~PAGE_MASK))
118 			vdev->regions[i].flags |=
119 					VFIO_REGION_INFO_FLAG_MMAP;
120 		vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
121 		if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
122 			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
123 	}
124 
125 	return 0;
126 }
127 
128 static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
129 {
130 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
131 	int i;
132 
133 	for (i = 0; i < mc_dev->obj_desc.region_count; i++)
134 		iounmap(vdev->regions[i].ioaddr);
135 	kfree(vdev->regions);
136 }
137 
138 static int vfio_fsl_mc_open(void *device_data)
139 {
140 	struct vfio_fsl_mc_device *vdev = device_data;
141 	int ret;
142 
143 	if (!try_module_get(THIS_MODULE))
144 		return -ENODEV;
145 
146 	mutex_lock(&vdev->reflck->lock);
147 	if (!vdev->refcnt) {
148 		ret = vfio_fsl_mc_regions_init(vdev);
149 		if (ret)
150 			goto err_reg_init;
151 	}
152 	vdev->refcnt++;
153 
154 	mutex_unlock(&vdev->reflck->lock);
155 
156 	return 0;
157 
158 err_reg_init:
159 	mutex_unlock(&vdev->reflck->lock);
160 	module_put(THIS_MODULE);
161 	return ret;
162 }
163 
164 static void vfio_fsl_mc_release(void *device_data)
165 {
166 	struct vfio_fsl_mc_device *vdev = device_data;
167 	int ret;
168 
169 	mutex_lock(&vdev->reflck->lock);
170 
171 	if (!(--vdev->refcnt)) {
172 		struct fsl_mc_device *mc_dev = vdev->mc_dev;
173 		struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
174 		struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
175 
176 		vfio_fsl_mc_regions_cleanup(vdev);
177 
178 		/* reset the device before cleaning up the interrupts */
179 		ret = dprc_reset_container(mc_cont->mc_io, 0,
180 		      mc_cont->mc_handle,
181 			  mc_cont->obj_desc.id,
182 			  DPRC_RESET_OPTION_NON_RECURSIVE);
183 
184 		if (ret) {
185 			dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
186 				 ret);
187 			WARN_ON(1);
188 		}
189 
190 		vfio_fsl_mc_irqs_cleanup(vdev);
191 
192 		fsl_mc_cleanup_irq_pool(mc_cont);
193 	}
194 
195 	mutex_unlock(&vdev->reflck->lock);
196 
197 	module_put(THIS_MODULE);
198 }
199 
200 static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
201 			      unsigned long arg)
202 {
203 	unsigned long minsz;
204 	struct vfio_fsl_mc_device *vdev = device_data;
205 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
206 
207 	switch (cmd) {
208 	case VFIO_DEVICE_GET_INFO:
209 	{
210 		struct vfio_device_info info;
211 
212 		minsz = offsetofend(struct vfio_device_info, num_irqs);
213 
214 		if (copy_from_user(&info, (void __user *)arg, minsz))
215 			return -EFAULT;
216 
217 		if (info.argsz < minsz)
218 			return -EINVAL;
219 
220 		info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
221 
222 		if (is_fsl_mc_bus_dprc(mc_dev))
223 			info.flags |= VFIO_DEVICE_FLAGS_RESET;
224 
225 		info.num_regions = mc_dev->obj_desc.region_count;
226 		info.num_irqs = mc_dev->obj_desc.irq_count;
227 
228 		return copy_to_user((void __user *)arg, &info, minsz) ?
229 			-EFAULT : 0;
230 	}
231 	case VFIO_DEVICE_GET_REGION_INFO:
232 	{
233 		struct vfio_region_info info;
234 
235 		minsz = offsetofend(struct vfio_region_info, offset);
236 
237 		if (copy_from_user(&info, (void __user *)arg, minsz))
238 			return -EFAULT;
239 
240 		if (info.argsz < minsz)
241 			return -EINVAL;
242 
243 		if (info.index >= mc_dev->obj_desc.region_count)
244 			return -EINVAL;
245 
246 		/* map offset to the physical address  */
247 		info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
248 		info.size = vdev->regions[info.index].size;
249 		info.flags = vdev->regions[info.index].flags;
250 
251 		return copy_to_user((void __user *)arg, &info, minsz);
252 	}
253 	case VFIO_DEVICE_GET_IRQ_INFO:
254 	{
255 		struct vfio_irq_info info;
256 
257 		minsz = offsetofend(struct vfio_irq_info, count);
258 		if (copy_from_user(&info, (void __user *)arg, minsz))
259 			return -EFAULT;
260 
261 		if (info.argsz < minsz)
262 			return -EINVAL;
263 
264 		if (info.index >= mc_dev->obj_desc.irq_count)
265 			return -EINVAL;
266 
267 		info.flags = VFIO_IRQ_INFO_EVENTFD;
268 		info.count = 1;
269 
270 		return copy_to_user((void __user *)arg, &info, minsz);
271 	}
272 	case VFIO_DEVICE_SET_IRQS:
273 	{
274 		struct vfio_irq_set hdr;
275 		u8 *data = NULL;
276 		int ret = 0;
277 		size_t data_size = 0;
278 
279 		minsz = offsetofend(struct vfio_irq_set, count);
280 
281 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
282 			return -EFAULT;
283 
284 		ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
285 					mc_dev->obj_desc.irq_count, &data_size);
286 		if (ret)
287 			return ret;
288 
289 		if (data_size) {
290 			data = memdup_user((void __user *)(arg + minsz),
291 				   data_size);
292 			if (IS_ERR(data))
293 				return PTR_ERR(data);
294 		}
295 
296 		mutex_lock(&vdev->igate);
297 		ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
298 						 hdr.index, hdr.start,
299 						 hdr.count, data);
300 		mutex_unlock(&vdev->igate);
301 		kfree(data);
302 
303 		return ret;
304 	}
305 	case VFIO_DEVICE_RESET:
306 	{
307 		int ret;
308 		struct fsl_mc_device *mc_dev = vdev->mc_dev;
309 
310 		/* reset is supported only for the DPRC */
311 		if (!is_fsl_mc_bus_dprc(mc_dev))
312 			return -ENOTTY;
313 
314 		ret = dprc_reset_container(mc_dev->mc_io, 0,
315 					   mc_dev->mc_handle,
316 					   mc_dev->obj_desc.id,
317 					   DPRC_RESET_OPTION_NON_RECURSIVE);
318 		return ret;
319 
320 	}
321 	default:
322 		return -ENOTTY;
323 	}
324 }
325 
326 static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
327 				size_t count, loff_t *ppos)
328 {
329 	struct vfio_fsl_mc_device *vdev = device_data;
330 	unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
331 	loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
332 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
333 	struct vfio_fsl_mc_region *region;
334 	u64 data[8];
335 	int i;
336 
337 	if (index >= mc_dev->obj_desc.region_count)
338 		return -EINVAL;
339 
340 	region = &vdev->regions[index];
341 
342 	if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
343 		return -EINVAL;
344 
345 	if (!region->ioaddr) {
346 		region->ioaddr = ioremap(region->addr, region->size);
347 		if (!region->ioaddr)
348 			return -ENOMEM;
349 	}
350 
351 	if (count != 64 || off != 0)
352 		return -EINVAL;
353 
354 	for (i = 7; i >= 0; i--)
355 		data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
356 
357 	if (copy_to_user(buf, data, 64))
358 		return -EFAULT;
359 
360 	return count;
361 }
362 
363 #define MC_CMD_COMPLETION_TIMEOUT_MS    5000
364 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS    500
365 
366 static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
367 {
368 	int i;
369 	enum mc_cmd_status status;
370 	unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
371 
372 	/* Write at command parameter into portal */
373 	for (i = 7; i >= 1; i--)
374 		writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
375 
376 	/* Write command header in the end */
377 	writeq(cmd_data[0], ioaddr);
378 
379 	/* Wait for response before returning to user-space
380 	 * This can be optimized in future to even prepare response
381 	 * before returning to user-space and avoid read ioctl.
382 	 */
383 	for (;;) {
384 		u64 header;
385 		struct mc_cmd_header *resp_hdr;
386 
387 		header = cpu_to_le64(readq_relaxed(ioaddr));
388 
389 		resp_hdr = (struct mc_cmd_header *)&header;
390 		status = (enum mc_cmd_status)resp_hdr->status;
391 		if (status != MC_CMD_STATUS_READY)
392 			break;
393 
394 		udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
395 		timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
396 		if (timeout_usecs == 0)
397 			return -ETIMEDOUT;
398 	}
399 
400 	return 0;
401 }
402 
403 static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
404 				 size_t count, loff_t *ppos)
405 {
406 	struct vfio_fsl_mc_device *vdev = device_data;
407 	unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
408 	loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
409 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
410 	struct vfio_fsl_mc_region *region;
411 	u64 data[8];
412 	int ret;
413 
414 	if (index >= mc_dev->obj_desc.region_count)
415 		return -EINVAL;
416 
417 	region = &vdev->regions[index];
418 
419 	if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
420 		return -EINVAL;
421 
422 	if (!region->ioaddr) {
423 		region->ioaddr = ioremap(region->addr, region->size);
424 		if (!region->ioaddr)
425 			return -ENOMEM;
426 	}
427 
428 	if (count != 64 || off != 0)
429 		return -EINVAL;
430 
431 	if (copy_from_user(&data, buf, 64))
432 		return -EFAULT;
433 
434 	ret = vfio_fsl_mc_send_command(region->ioaddr, data);
435 	if (ret)
436 		return ret;
437 
438 	return count;
439 
440 }
441 
442 static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
443 				 struct vm_area_struct *vma)
444 {
445 	u64 size = vma->vm_end - vma->vm_start;
446 	u64 pgoff, base;
447 	u8 region_cacheable;
448 
449 	pgoff = vma->vm_pgoff &
450 		((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
451 	base = pgoff << PAGE_SHIFT;
452 
453 	if (region.size < PAGE_SIZE || base + size > region.size)
454 		return -EINVAL;
455 
456 	region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
457 			   (region.type & FSL_MC_REGION_SHAREABLE);
458 	if (!region_cacheable)
459 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
460 
461 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
462 
463 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
464 			       size, vma->vm_page_prot);
465 }
466 
467 static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
468 {
469 	struct vfio_fsl_mc_device *vdev = device_data;
470 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
471 	int index;
472 
473 	index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
474 
475 	if (vma->vm_end < vma->vm_start)
476 		return -EINVAL;
477 	if (vma->vm_start & ~PAGE_MASK)
478 		return -EINVAL;
479 	if (vma->vm_end & ~PAGE_MASK)
480 		return -EINVAL;
481 	if (!(vma->vm_flags & VM_SHARED))
482 		return -EINVAL;
483 	if (index >= mc_dev->obj_desc.region_count)
484 		return -EINVAL;
485 
486 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
487 		return -EINVAL;
488 
489 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
490 			&& (vma->vm_flags & VM_READ))
491 		return -EINVAL;
492 
493 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
494 			&& (vma->vm_flags & VM_WRITE))
495 		return -EINVAL;
496 
497 	vma->vm_private_data = mc_dev;
498 
499 	return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
500 }
501 
502 static const struct vfio_device_ops vfio_fsl_mc_ops = {
503 	.name		= "vfio-fsl-mc",
504 	.open		= vfio_fsl_mc_open,
505 	.release	= vfio_fsl_mc_release,
506 	.ioctl		= vfio_fsl_mc_ioctl,
507 	.read		= vfio_fsl_mc_read,
508 	.write		= vfio_fsl_mc_write,
509 	.mmap		= vfio_fsl_mc_mmap,
510 };
511 
512 static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
513 				    unsigned long action, void *data)
514 {
515 	struct vfio_fsl_mc_device *vdev = container_of(nb,
516 					struct vfio_fsl_mc_device, nb);
517 	struct device *dev = data;
518 	struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
519 	struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
520 
521 	if (action == BUS_NOTIFY_ADD_DEVICE &&
522 	    vdev->mc_dev == mc_cont) {
523 		mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
524 						    vfio_fsl_mc_ops.name);
525 		if (!mc_dev->driver_override)
526 			dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
527 				 dev_name(&mc_cont->dev));
528 		else
529 			dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
530 				 dev_name(&mc_cont->dev));
531 	} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
532 		vdev->mc_dev == mc_cont) {
533 		struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
534 
535 		if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
536 			dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
537 				 dev_name(dev), mc_drv->driver.name);
538 	}
539 
540 	return 0;
541 }
542 
543 static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
544 {
545 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
546 	int ret;
547 
548 	/* Non-dprc devices share mc_io from parent */
549 	if (!is_fsl_mc_bus_dprc(mc_dev)) {
550 		struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
551 
552 		mc_dev->mc_io = mc_cont->mc_io;
553 		return 0;
554 	}
555 
556 	vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
557 	ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
558 	if (ret)
559 		return ret;
560 
561 	/* open DPRC, allocate a MC portal */
562 	ret = dprc_setup(mc_dev);
563 	if (ret) {
564 		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
565 		goto out_nc_unreg;
566 	}
567 
568 	ret = dprc_scan_container(mc_dev, false);
569 	if (ret) {
570 		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
571 		goto out_dprc_cleanup;
572 	}
573 
574 	return 0;
575 
576 out_dprc_cleanup:
577 	dprc_remove_devices(mc_dev, NULL, 0);
578 	dprc_cleanup(mc_dev);
579 out_nc_unreg:
580 	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
581 	vdev->nb.notifier_call = NULL;
582 
583 	return ret;
584 }
585 
586 static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
587 {
588 	struct iommu_group *group;
589 	struct vfio_fsl_mc_device *vdev;
590 	struct device *dev = &mc_dev->dev;
591 	int ret;
592 
593 	group = vfio_iommu_group_get(dev);
594 	if (!group) {
595 		dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
596 		return -EINVAL;
597 	}
598 
599 	vdev = devm_kzalloc(dev, sizeof(*vdev), GFP_KERNEL);
600 	if (!vdev) {
601 		ret = -ENOMEM;
602 		goto out_group_put;
603 	}
604 
605 	vdev->mc_dev = mc_dev;
606 
607 	ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
608 	if (ret) {
609 		dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
610 		goto out_group_put;
611 	}
612 
613 	ret = vfio_fsl_mc_reflck_attach(vdev);
614 	if (ret)
615 		goto out_group_dev;
616 
617 	ret = vfio_fsl_mc_init_device(vdev);
618 	if (ret)
619 		goto out_reflck;
620 
621 	mutex_init(&vdev->igate);
622 
623 	return 0;
624 
625 out_reflck:
626 	vfio_fsl_mc_reflck_put(vdev->reflck);
627 out_group_dev:
628 	vfio_del_group_dev(dev);
629 out_group_put:
630 	vfio_iommu_group_put(group, dev);
631 	return ret;
632 }
633 
634 static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
635 {
636 	struct vfio_fsl_mc_device *vdev;
637 	struct device *dev = &mc_dev->dev;
638 
639 	vdev = vfio_del_group_dev(dev);
640 	if (!vdev)
641 		return -EINVAL;
642 
643 	mutex_destroy(&vdev->igate);
644 
645 	vfio_fsl_mc_reflck_put(vdev->reflck);
646 
647 	if (is_fsl_mc_bus_dprc(mc_dev)) {
648 		dprc_remove_devices(mc_dev, NULL, 0);
649 		dprc_cleanup(mc_dev);
650 	}
651 
652 	if (vdev->nb.notifier_call)
653 		bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
654 
655 	vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
656 
657 	return 0;
658 }
659 
660 static struct fsl_mc_driver vfio_fsl_mc_driver = {
661 	.probe		= vfio_fsl_mc_probe,
662 	.remove		= vfio_fsl_mc_remove,
663 	.driver	= {
664 		.name	= "vfio-fsl-mc",
665 		.owner	= THIS_MODULE,
666 	},
667 };
668 
669 static int __init vfio_fsl_mc_driver_init(void)
670 {
671 	return fsl_mc_driver_register(&vfio_fsl_mc_driver);
672 }
673 
674 static void __exit vfio_fsl_mc_driver_exit(void)
675 {
676 	fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
677 }
678 
679 module_init(vfio_fsl_mc_driver_init);
680 module_exit(vfio_fsl_mc_driver_exit);
681 
682 MODULE_LICENSE("Dual BSD/GPL");
683 MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
684