xref: /openbmc/linux/samples/vfio-mdev/mdpy.c (revision 89345d51)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Mediated virtual PCI display host device driver
4  *
5  * See mdpy-defs.h for device specs
6  *
7  *   (c) Gerd Hoffmann <kraxel@redhat.com>
8  *
9  * based on mtty driver which is:
10  *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11  *	 Author: Neo Jia <cjia@nvidia.com>
12  *		 Kirti Wankhede <kwankhede@nvidia.com>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License version 2 as
16  * published by the Free Software Foundation.
17  */
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/cdev.h>
24 #include <linux/vfio.h>
25 #include <linux/iommu.h>
26 #include <linux/sysfs.h>
27 #include <linux/mdev.h>
28 #include <linux/pci.h>
29 #include <drm/drm_fourcc.h>
30 #include "mdpy-defs.h"
31 
32 #define MDPY_NAME		"mdpy"
33 #define MDPY_CLASS_NAME		"mdpy"
34 
35 #define MDPY_CONFIG_SPACE_SIZE	0xff
36 #define MDPY_MEMORY_BAR_OFFSET	PAGE_SIZE
37 #define MDPY_DISPLAY_REGION	16
38 
39 #define STORE_LE16(addr, val)	(*(u16 *)addr = val)
40 #define STORE_LE32(addr, val)	(*(u32 *)addr = val)
41 
42 
43 MODULE_LICENSE("GPL v2");
44 
45 static int max_devices = 4;
46 module_param_named(count, max_devices, int, 0444);
47 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
48 
49 
50 #define MDPY_TYPE_1 "vga"
51 #define MDPY_TYPE_2 "xga"
52 #define MDPY_TYPE_3 "hd"
53 
54 static const struct mdpy_type {
55 	const char *name;
56 	u32 format;
57 	u32 bytepp;
58 	u32 width;
59 	u32 height;
60 } mdpy_types[] = {
61 	{
62 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_1,
63 		.format = DRM_FORMAT_XRGB8888,
64 		.bytepp = 4,
65 		.width	= 640,
66 		.height = 480,
67 	}, {
68 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_2,
69 		.format = DRM_FORMAT_XRGB8888,
70 		.bytepp = 4,
71 		.width	= 1024,
72 		.height = 768,
73 	}, {
74 		.name	= MDPY_CLASS_NAME "-" MDPY_TYPE_3,
75 		.format = DRM_FORMAT_XRGB8888,
76 		.bytepp = 4,
77 		.width	= 1920,
78 		.height = 1080,
79 	},
80 };
81 
82 static dev_t		mdpy_devt;
83 static struct class	*mdpy_class;
84 static struct cdev	mdpy_cdev;
85 static struct device	mdpy_dev;
86 static struct mdev_parent mdpy_parent;
87 static u32		mdpy_count;
88 static const struct vfio_device_ops mdpy_dev_ops;
89 
90 /* State of each mdev device */
91 struct mdev_state {
92 	struct vfio_device vdev;
93 	u8 *vconfig;
94 	u32 bar_mask;
95 	struct mutex ops_lock;
96 	struct mdev_device *mdev;
97 	struct vfio_device_info dev_info;
98 
99 	const struct mdpy_type *type;
100 	u32 memsize;
101 	void *memblk;
102 };
103 
104 static void mdpy_create_config_space(struct mdev_state *mdev_state)
105 {
106 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
107 		   MDPY_PCI_VENDOR_ID);
108 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
109 		   MDPY_PCI_DEVICE_ID);
110 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
111 		   MDPY_PCI_SUBVENDOR_ID);
112 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
113 		   MDPY_PCI_SUBDEVICE_ID);
114 
115 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
116 		   PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
117 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
118 		   PCI_STATUS_CAP_LIST);
119 	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
120 		   PCI_CLASS_DISPLAY_OTHER);
121 	mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
122 
123 	STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
124 		   PCI_BASE_ADDRESS_SPACE_MEMORY |
125 		   PCI_BASE_ADDRESS_MEM_TYPE_32	 |
126 		   PCI_BASE_ADDRESS_MEM_PREFETCH);
127 	mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
128 
129 	/* vendor specific capability for the config registers */
130 	mdev_state->vconfig[PCI_CAPABILITY_LIST]       = MDPY_VENDORCAP_OFFSET;
131 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
132 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
133 	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
134 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
135 		   mdev_state->type->format);
136 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
137 		   mdev_state->type->width);
138 	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
139 		   mdev_state->type->height);
140 }
141 
142 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
143 				 char *buf, u32 count)
144 {
145 	struct device *dev = mdev_dev(mdev_state->mdev);
146 	u32 cfg_addr;
147 
148 	switch (offset) {
149 	case PCI_BASE_ADDRESS_0:
150 		cfg_addr = *(u32 *)buf;
151 
152 		if (cfg_addr == 0xffffffff) {
153 			cfg_addr = (cfg_addr & mdev_state->bar_mask);
154 		} else {
155 			cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
156 			if (cfg_addr)
157 				dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
158 		}
159 
160 		cfg_addr |= (mdev_state->vconfig[offset] &
161 			     ~PCI_BASE_ADDRESS_MEM_MASK);
162 		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
163 		break;
164 	}
165 }
166 
167 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
168 			   size_t count, loff_t pos, bool is_write)
169 {
170 	int ret = 0;
171 
172 	mutex_lock(&mdev_state->ops_lock);
173 
174 	if (pos < MDPY_CONFIG_SPACE_SIZE) {
175 		if (is_write)
176 			handle_pci_cfg_write(mdev_state, pos, buf, count);
177 		else
178 			memcpy(buf, (mdev_state->vconfig + pos), count);
179 
180 	} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
181 		   (pos + count <=
182 		    MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
183 		pos -= MDPY_MEMORY_BAR_OFFSET;
184 		if (is_write)
185 			memcpy(mdev_state->memblk, buf, count);
186 		else
187 			memcpy(buf, mdev_state->memblk, count);
188 
189 	} else {
190 		dev_info(mdev_state->vdev.dev,
191 			 "%s: %s @0x%llx (unhandled)\n", __func__,
192 			 is_write ? "WR" : "RD", pos);
193 		ret = -1;
194 		goto accessfailed;
195 	}
196 
197 	ret = count;
198 
199 
200 accessfailed:
201 	mutex_unlock(&mdev_state->ops_lock);
202 
203 	return ret;
204 }
205 
206 static int mdpy_reset(struct mdev_state *mdev_state)
207 {
208 	u32 stride, i;
209 
210 	/* initialize with gray gradient */
211 	stride = mdev_state->type->width * mdev_state->type->bytepp;
212 	for (i = 0; i < mdev_state->type->height; i++)
213 		memset(mdev_state->memblk + i * stride,
214 		       i * 255 / mdev_state->type->height,
215 		       stride);
216 	return 0;
217 }
218 
219 static int mdpy_init_dev(struct vfio_device *vdev)
220 {
221 	struct mdev_state *mdev_state =
222 		container_of(vdev, struct mdev_state, vdev);
223 	struct mdev_device *mdev = to_mdev_device(vdev->dev);
224 	const struct mdpy_type *type =
225 		&mdpy_types[mdev_get_type_group_id(mdev)];
226 	u32 fbsize;
227 	int ret = -ENOMEM;
228 
229 	if (mdpy_count >= max_devices)
230 		return ret;
231 
232 	mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
233 	if (!mdev_state->vconfig)
234 		return ret;
235 
236 	fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
237 
238 	mdev_state->memblk = vmalloc_user(fbsize);
239 	if (!mdev_state->memblk)
240 		goto out_vconfig;
241 
242 	mutex_init(&mdev_state->ops_lock);
243 	mdev_state->mdev = mdev;
244 	mdev_state->type = type;
245 	mdev_state->memsize = fbsize;
246 	mdpy_create_config_space(mdev_state);
247 	mdpy_reset(mdev_state);
248 
249 	dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width,
250 		 type->height);
251 
252 	mdpy_count++;
253 	return 0;
254 
255 out_vconfig:
256 	kfree(mdev_state->vconfig);
257 	return ret;
258 }
259 
260 static int mdpy_probe(struct mdev_device *mdev)
261 {
262 	struct mdev_state *mdev_state;
263 	int ret;
264 
265 	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
266 				       &mdpy_dev_ops);
267 	if (IS_ERR(mdev_state))
268 		return PTR_ERR(mdev_state);
269 
270 	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
271 	if (ret)
272 		goto err_put_vdev;
273 	dev_set_drvdata(&mdev->dev, mdev_state);
274 	return 0;
275 
276 err_put_vdev:
277 	vfio_put_device(&mdev_state->vdev);
278 	return ret;
279 }
280 
281 static void mdpy_release_dev(struct vfio_device *vdev)
282 {
283 	struct mdev_state *mdev_state =
284 		container_of(vdev, struct mdev_state, vdev);
285 
286 	mdpy_count--;
287 	vfree(mdev_state->memblk);
288 	kfree(mdev_state->vconfig);
289 	vfio_free_device(vdev);
290 }
291 
292 static void mdpy_remove(struct mdev_device *mdev)
293 {
294 	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
295 
296 	dev_info(&mdev->dev, "%s\n", __func__);
297 
298 	vfio_unregister_group_dev(&mdev_state->vdev);
299 	vfio_put_device(&mdev_state->vdev);
300 }
301 
302 static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
303 			 size_t count, loff_t *ppos)
304 {
305 	struct mdev_state *mdev_state =
306 		container_of(vdev, struct mdev_state, vdev);
307 	unsigned int done = 0;
308 	int ret;
309 
310 	while (count) {
311 		size_t filled;
312 
313 		if (count >= 4 && !(*ppos % 4)) {
314 			u32 val;
315 
316 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
317 					  *ppos, false);
318 			if (ret <= 0)
319 				goto read_err;
320 
321 			if (copy_to_user(buf, &val, sizeof(val)))
322 				goto read_err;
323 
324 			filled = 4;
325 		} else if (count >= 2 && !(*ppos % 2)) {
326 			u16 val;
327 
328 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
329 					  *ppos, false);
330 			if (ret <= 0)
331 				goto read_err;
332 
333 			if (copy_to_user(buf, &val, sizeof(val)))
334 				goto read_err;
335 
336 			filled = 2;
337 		} else {
338 			u8 val;
339 
340 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
341 					  *ppos, false);
342 			if (ret <= 0)
343 				goto read_err;
344 
345 			if (copy_to_user(buf, &val, sizeof(val)))
346 				goto read_err;
347 
348 			filled = 1;
349 		}
350 
351 		count -= filled;
352 		done += filled;
353 		*ppos += filled;
354 		buf += filled;
355 	}
356 
357 	return done;
358 
359 read_err:
360 	return -EFAULT;
361 }
362 
363 static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
364 			  size_t count, loff_t *ppos)
365 {
366 	struct mdev_state *mdev_state =
367 		container_of(vdev, struct mdev_state, vdev);
368 	unsigned int done = 0;
369 	int ret;
370 
371 	while (count) {
372 		size_t filled;
373 
374 		if (count >= 4 && !(*ppos % 4)) {
375 			u32 val;
376 
377 			if (copy_from_user(&val, buf, sizeof(val)))
378 				goto write_err;
379 
380 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
381 					  *ppos, true);
382 			if (ret <= 0)
383 				goto write_err;
384 
385 			filled = 4;
386 		} else if (count >= 2 && !(*ppos % 2)) {
387 			u16 val;
388 
389 			if (copy_from_user(&val, buf, sizeof(val)))
390 				goto write_err;
391 
392 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
393 					  *ppos, true);
394 			if (ret <= 0)
395 				goto write_err;
396 
397 			filled = 2;
398 		} else {
399 			u8 val;
400 
401 			if (copy_from_user(&val, buf, sizeof(val)))
402 				goto write_err;
403 
404 			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
405 					  *ppos, true);
406 			if (ret <= 0)
407 				goto write_err;
408 
409 			filled = 1;
410 		}
411 		count -= filled;
412 		done += filled;
413 		*ppos += filled;
414 		buf += filled;
415 	}
416 
417 	return done;
418 write_err:
419 	return -EFAULT;
420 }
421 
422 static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
423 {
424 	struct mdev_state *mdev_state =
425 		container_of(vdev, struct mdev_state, vdev);
426 
427 	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
428 		return -EINVAL;
429 	if (vma->vm_end < vma->vm_start)
430 		return -EINVAL;
431 	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
432 		return -EINVAL;
433 	if ((vma->vm_flags & VM_SHARED) == 0)
434 		return -EINVAL;
435 
436 	return remap_vmalloc_range(vma, mdev_state->memblk, 0);
437 }
438 
439 static int mdpy_get_region_info(struct mdev_state *mdev_state,
440 				struct vfio_region_info *region_info,
441 				u16 *cap_type_id, void **cap_type)
442 {
443 	if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
444 	    region_info->index != MDPY_DISPLAY_REGION)
445 		return -EINVAL;
446 
447 	switch (region_info->index) {
448 	case VFIO_PCI_CONFIG_REGION_INDEX:
449 		region_info->offset = 0;
450 		region_info->size   = MDPY_CONFIG_SPACE_SIZE;
451 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
452 				       VFIO_REGION_INFO_FLAG_WRITE);
453 		break;
454 	case VFIO_PCI_BAR0_REGION_INDEX:
455 	case MDPY_DISPLAY_REGION:
456 		region_info->offset = MDPY_MEMORY_BAR_OFFSET;
457 		region_info->size   = mdev_state->memsize;
458 		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
459 				       VFIO_REGION_INFO_FLAG_WRITE |
460 				       VFIO_REGION_INFO_FLAG_MMAP);
461 		break;
462 	default:
463 		region_info->size   = 0;
464 		region_info->offset = 0;
465 		region_info->flags  = 0;
466 	}
467 
468 	return 0;
469 }
470 
471 static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
472 {
473 	irq_info->count = 0;
474 	return 0;
475 }
476 
477 static int mdpy_get_device_info(struct vfio_device_info *dev_info)
478 {
479 	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
480 	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
481 	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
482 	return 0;
483 }
484 
485 static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
486 				struct vfio_device_gfx_plane_info *plane)
487 {
488 	if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
489 		if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
490 				     VFIO_GFX_PLANE_TYPE_REGION))
491 			return 0;
492 		return -EINVAL;
493 	}
494 
495 	if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
496 		return -EINVAL;
497 
498 	plane->drm_format     = mdev_state->type->format;
499 	plane->width	      = mdev_state->type->width;
500 	plane->height	      = mdev_state->type->height;
501 	plane->stride	      = (mdev_state->type->width *
502 				 mdev_state->type->bytepp);
503 	plane->size	      = mdev_state->memsize;
504 	plane->region_index   = MDPY_DISPLAY_REGION;
505 
506 	/* unused */
507 	plane->drm_format_mod = 0;
508 	plane->x_pos	      = 0;
509 	plane->y_pos	      = 0;
510 	plane->x_hot	      = 0;
511 	plane->y_hot	      = 0;
512 
513 	return 0;
514 }
515 
516 static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
517 		       unsigned long arg)
518 {
519 	int ret = 0;
520 	unsigned long minsz;
521 	struct mdev_state *mdev_state =
522 		container_of(vdev, struct mdev_state, vdev);
523 
524 	switch (cmd) {
525 	case VFIO_DEVICE_GET_INFO:
526 	{
527 		struct vfio_device_info info;
528 
529 		minsz = offsetofend(struct vfio_device_info, num_irqs);
530 
531 		if (copy_from_user(&info, (void __user *)arg, minsz))
532 			return -EFAULT;
533 
534 		if (info.argsz < minsz)
535 			return -EINVAL;
536 
537 		ret = mdpy_get_device_info(&info);
538 		if (ret)
539 			return ret;
540 
541 		memcpy(&mdev_state->dev_info, &info, sizeof(info));
542 
543 		if (copy_to_user((void __user *)arg, &info, minsz))
544 			return -EFAULT;
545 
546 		return 0;
547 	}
548 	case VFIO_DEVICE_GET_REGION_INFO:
549 	{
550 		struct vfio_region_info info;
551 		u16 cap_type_id = 0;
552 		void *cap_type = NULL;
553 
554 		minsz = offsetofend(struct vfio_region_info, offset);
555 
556 		if (copy_from_user(&info, (void __user *)arg, minsz))
557 			return -EFAULT;
558 
559 		if (info.argsz < minsz)
560 			return -EINVAL;
561 
562 		ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
563 					   &cap_type);
564 		if (ret)
565 			return ret;
566 
567 		if (copy_to_user((void __user *)arg, &info, minsz))
568 			return -EFAULT;
569 
570 		return 0;
571 	}
572 
573 	case VFIO_DEVICE_GET_IRQ_INFO:
574 	{
575 		struct vfio_irq_info info;
576 
577 		minsz = offsetofend(struct vfio_irq_info, count);
578 
579 		if (copy_from_user(&info, (void __user *)arg, minsz))
580 			return -EFAULT;
581 
582 		if ((info.argsz < minsz) ||
583 		    (info.index >= mdev_state->dev_info.num_irqs))
584 			return -EINVAL;
585 
586 		ret = mdpy_get_irq_info(&info);
587 		if (ret)
588 			return ret;
589 
590 		if (copy_to_user((void __user *)arg, &info, minsz))
591 			return -EFAULT;
592 
593 		return 0;
594 	}
595 
596 	case VFIO_DEVICE_QUERY_GFX_PLANE:
597 	{
598 		struct vfio_device_gfx_plane_info plane;
599 
600 		minsz = offsetofend(struct vfio_device_gfx_plane_info,
601 				    region_index);
602 
603 		if (copy_from_user(&plane, (void __user *)arg, minsz))
604 			return -EFAULT;
605 
606 		if (plane.argsz < minsz)
607 			return -EINVAL;
608 
609 		ret = mdpy_query_gfx_plane(mdev_state, &plane);
610 		if (ret)
611 			return ret;
612 
613 		if (copy_to_user((void __user *)arg, &plane, minsz))
614 			return -EFAULT;
615 
616 		return 0;
617 	}
618 
619 	case VFIO_DEVICE_SET_IRQS:
620 		return -EINVAL;
621 
622 	case VFIO_DEVICE_RESET:
623 		return mdpy_reset(mdev_state);
624 	}
625 	return -ENOTTY;
626 }
627 
628 static ssize_t
629 resolution_show(struct device *dev, struct device_attribute *attr,
630 		char *buf)
631 {
632 	struct mdev_state *mdev_state = dev_get_drvdata(dev);
633 
634 	return sprintf(buf, "%dx%d\n",
635 		       mdev_state->type->width,
636 		       mdev_state->type->height);
637 }
638 static DEVICE_ATTR_RO(resolution);
639 
640 static struct attribute *mdev_dev_attrs[] = {
641 	&dev_attr_resolution.attr,
642 	NULL,
643 };
644 
645 static const struct attribute_group mdev_dev_group = {
646 	.name  = "vendor",
647 	.attrs = mdev_dev_attrs,
648 };
649 
650 static const struct attribute_group *mdev_dev_groups[] = {
651 	&mdev_dev_group,
652 	NULL,
653 };
654 
655 static ssize_t name_show(struct mdev_type *mtype,
656 			 struct mdev_type_attribute *attr, char *buf)
657 {
658 	const struct mdpy_type *type =
659 		&mdpy_types[mtype_get_type_group_id(mtype)];
660 
661 	return sprintf(buf, "%s\n", type->name);
662 }
663 static MDEV_TYPE_ATTR_RO(name);
664 
665 static ssize_t description_show(struct mdev_type *mtype,
666 				struct mdev_type_attribute *attr, char *buf)
667 {
668 	const struct mdpy_type *type =
669 		&mdpy_types[mtype_get_type_group_id(mtype)];
670 
671 	return sprintf(buf, "virtual display, %dx%d framebuffer\n",
672 		       type->width, type->height);
673 }
674 static MDEV_TYPE_ATTR_RO(description);
675 
676 static ssize_t available_instances_show(struct mdev_type *mtype,
677 					struct mdev_type_attribute *attr,
678 					char *buf)
679 {
680 	return sprintf(buf, "%d\n", max_devices - mdpy_count);
681 }
682 static MDEV_TYPE_ATTR_RO(available_instances);
683 
684 static ssize_t device_api_show(struct mdev_type *mtype,
685 			       struct mdev_type_attribute *attr, char *buf)
686 {
687 	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
688 }
689 static MDEV_TYPE_ATTR_RO(device_api);
690 
691 static struct attribute *mdev_types_attrs[] = {
692 	&mdev_type_attr_name.attr,
693 	&mdev_type_attr_description.attr,
694 	&mdev_type_attr_device_api.attr,
695 	&mdev_type_attr_available_instances.attr,
696 	NULL,
697 };
698 
699 static struct attribute_group mdev_type_group1 = {
700 	.name  = MDPY_TYPE_1,
701 	.attrs = mdev_types_attrs,
702 };
703 
704 static struct attribute_group mdev_type_group2 = {
705 	.name  = MDPY_TYPE_2,
706 	.attrs = mdev_types_attrs,
707 };
708 
709 static struct attribute_group mdev_type_group3 = {
710 	.name  = MDPY_TYPE_3,
711 	.attrs = mdev_types_attrs,
712 };
713 
714 static struct attribute_group *mdev_type_groups[] = {
715 	&mdev_type_group1,
716 	&mdev_type_group2,
717 	&mdev_type_group3,
718 	NULL,
719 };
720 
721 static const struct vfio_device_ops mdpy_dev_ops = {
722 	.init = mdpy_init_dev,
723 	.release = mdpy_release_dev,
724 	.read = mdpy_read,
725 	.write = mdpy_write,
726 	.ioctl = mdpy_ioctl,
727 	.mmap = mdpy_mmap,
728 };
729 
730 static struct mdev_driver mdpy_driver = {
731 	.driver = {
732 		.name = "mdpy",
733 		.owner = THIS_MODULE,
734 		.mod_name = KBUILD_MODNAME,
735 		.dev_groups = mdev_dev_groups,
736 	},
737 	.probe = mdpy_probe,
738 	.remove	= mdpy_remove,
739 	.supported_type_groups = mdev_type_groups,
740 };
741 
742 static const struct file_operations vd_fops = {
743 	.owner		= THIS_MODULE,
744 };
745 
746 static void mdpy_device_release(struct device *dev)
747 {
748 	/* nothing */
749 }
750 
751 static int __init mdpy_dev_init(void)
752 {
753 	int ret = 0;
754 
755 	ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
756 	if (ret < 0) {
757 		pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
758 		return ret;
759 	}
760 	cdev_init(&mdpy_cdev, &vd_fops);
761 	cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
762 	pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
763 
764 	ret = mdev_register_driver(&mdpy_driver);
765 	if (ret)
766 		goto err_cdev;
767 
768 	mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
769 	if (IS_ERR(mdpy_class)) {
770 		pr_err("Error: failed to register mdpy_dev class\n");
771 		ret = PTR_ERR(mdpy_class);
772 		goto err_driver;
773 	}
774 	mdpy_dev.class = mdpy_class;
775 	mdpy_dev.release = mdpy_device_release;
776 	dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
777 
778 	ret = device_register(&mdpy_dev);
779 	if (ret)
780 		goto err_class;
781 
782 	ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver);
783 	if (ret)
784 		goto err_device;
785 
786 	return 0;
787 
788 err_device:
789 	device_unregister(&mdpy_dev);
790 err_class:
791 	class_destroy(mdpy_class);
792 err_driver:
793 	mdev_unregister_driver(&mdpy_driver);
794 err_cdev:
795 	cdev_del(&mdpy_cdev);
796 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
797 	return ret;
798 }
799 
800 static void __exit mdpy_dev_exit(void)
801 {
802 	mdpy_dev.bus = NULL;
803 	mdev_unregister_parent(&mdpy_parent);
804 
805 	device_unregister(&mdpy_dev);
806 	mdev_unregister_driver(&mdpy_driver);
807 	cdev_del(&mdpy_cdev);
808 	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
809 	class_destroy(mdpy_class);
810 	mdpy_class = NULL;
811 }
812 
813 module_init(mdpy_dev_init)
814 module_exit(mdpy_dev_exit)
815