1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Mediated virtual PCI display host device driver 4 * 5 * See mdpy-defs.h for device specs 6 * 7 * (c) Gerd Hoffmann <kraxel@redhat.com> 8 * 9 * based on mtty driver which is: 10 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 11 * Author: Neo Jia <cjia@nvidia.com> 12 * Kirti Wankhede <kwankhede@nvidia.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License version 2 as 16 * published by the Free Software Foundation. 17 */ 18 #include <linux/init.h> 19 #include <linux/module.h> 20 #include <linux/device.h> 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/cdev.h> 25 #include <linux/vfio.h> 26 #include <linux/iommu.h> 27 #include <linux/sysfs.h> 28 #include <linux/mdev.h> 29 #include <linux/pci.h> 30 #include <drm/drm_fourcc.h> 31 #include "mdpy-defs.h" 32 33 #define MDPY_NAME "mdpy" 34 #define MDPY_CLASS_NAME "mdpy" 35 36 #define MDPY_CONFIG_SPACE_SIZE 0xff 37 #define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE 38 #define MDPY_DISPLAY_REGION 16 39 40 #define STORE_LE16(addr, val) (*(u16 *)addr = val) 41 #define STORE_LE32(addr, val) (*(u32 *)addr = val) 42 43 44 MODULE_LICENSE("GPL v2"); 45 46 static int max_devices = 4; 47 module_param_named(count, max_devices, int, 0444); 48 MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices"); 49 50 51 #define MDPY_TYPE_1 "vga" 52 #define MDPY_TYPE_2 "xga" 53 #define MDPY_TYPE_3 "hd" 54 55 static const struct mdpy_type { 56 const char *name; 57 u32 format; 58 u32 bytepp; 59 u32 width; 60 u32 height; 61 } mdpy_types[] = { 62 { 63 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_1, 64 .format = DRM_FORMAT_XRGB8888, 65 .bytepp = 4, 66 .width = 640, 67 .height = 480, 68 }, { 69 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_2, 70 .format = DRM_FORMAT_XRGB8888, 71 .bytepp = 4, 72 .width = 1024, 73 .height = 768, 74 }, { 75 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_3, 76 .format = DRM_FORMAT_XRGB8888, 77 .bytepp = 4, 78 .width = 1920, 79 .height = 1080, 80 }, 81 }; 82 83 static dev_t mdpy_devt; 84 static struct class *mdpy_class; 85 static struct cdev mdpy_cdev; 86 static struct device mdpy_dev; 87 static u32 mdpy_count; 88 89 /* State of each mdev device */ 90 struct mdev_state { 91 u8 *vconfig; 92 u32 bar_mask; 93 struct mutex ops_lock; 94 struct mdev_device *mdev; 95 struct vfio_device_info dev_info; 96 97 const struct mdpy_type *type; 98 u32 memsize; 99 void *memblk; 100 }; 101 102 static void mdpy_create_config_space(struct mdev_state *mdev_state) 103 { 104 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], 105 MDPY_PCI_VENDOR_ID); 106 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], 107 MDPY_PCI_DEVICE_ID); 108 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], 109 MDPY_PCI_SUBVENDOR_ID); 110 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], 111 MDPY_PCI_SUBDEVICE_ID); 112 113 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], 114 PCI_COMMAND_IO | PCI_COMMAND_MEMORY); 115 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS], 116 PCI_STATUS_CAP_LIST); 117 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], 118 PCI_CLASS_DISPLAY_OTHER); 119 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; 120 121 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], 122 PCI_BASE_ADDRESS_SPACE_MEMORY | 123 PCI_BASE_ADDRESS_MEM_TYPE_32 | 124 PCI_BASE_ADDRESS_MEM_PREFETCH); 125 mdev_state->bar_mask = ~(mdev_state->memsize) + 1; 126 127 /* vendor specific capability for the config registers */ 128 mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET; 129 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */ 130 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */ 131 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE; 132 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET], 133 mdev_state->type->format); 134 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET], 135 mdev_state->type->width); 136 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET], 137 mdev_state->type->height); 138 } 139 140 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, 141 char *buf, u32 count) 142 { 143 struct device *dev = mdev_dev(mdev_state->mdev); 144 u32 cfg_addr; 145 146 switch (offset) { 147 case PCI_BASE_ADDRESS_0: 148 cfg_addr = *(u32 *)buf; 149 150 if (cfg_addr == 0xffffffff) { 151 cfg_addr = (cfg_addr & mdev_state->bar_mask); 152 } else { 153 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK; 154 if (cfg_addr) 155 dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr); 156 } 157 158 cfg_addr |= (mdev_state->vconfig[offset] & 159 ~PCI_BASE_ADDRESS_MEM_MASK); 160 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); 161 break; 162 } 163 } 164 165 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count, 166 loff_t pos, bool is_write) 167 { 168 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 169 struct device *dev = mdev_dev(mdev); 170 int ret = 0; 171 172 mutex_lock(&mdev_state->ops_lock); 173 174 if (pos < MDPY_CONFIG_SPACE_SIZE) { 175 if (is_write) 176 handle_pci_cfg_write(mdev_state, pos, buf, count); 177 else 178 memcpy(buf, (mdev_state->vconfig + pos), count); 179 180 } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) && 181 (pos + count <= 182 MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) { 183 pos -= MDPY_MEMORY_BAR_OFFSET; 184 if (is_write) 185 memcpy(mdev_state->memblk, buf, count); 186 else 187 memcpy(buf, mdev_state->memblk, count); 188 189 } else { 190 dev_info(dev, "%s: %s @0x%llx (unhandled)\n", 191 __func__, is_write ? "WR" : "RD", pos); 192 ret = -1; 193 goto accessfailed; 194 } 195 196 ret = count; 197 198 199 accessfailed: 200 mutex_unlock(&mdev_state->ops_lock); 201 202 return ret; 203 } 204 205 static int mdpy_reset(struct mdev_device *mdev) 206 { 207 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 208 u32 stride, i; 209 210 /* initialize with gray gradient */ 211 stride = mdev_state->type->width * mdev_state->type->bytepp; 212 for (i = 0; i < mdev_state->type->height; i++) 213 memset(mdev_state->memblk + i * stride, 214 i * 255 / mdev_state->type->height, 215 stride); 216 return 0; 217 } 218 219 static int mdpy_create(struct mdev_device *mdev) 220 { 221 const struct mdpy_type *type = 222 &mdpy_types[mdev_get_type_group_id(mdev)]; 223 struct device *dev = mdev_dev(mdev); 224 struct mdev_state *mdev_state; 225 u32 fbsize; 226 227 if (mdpy_count >= max_devices) 228 return -ENOMEM; 229 230 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); 231 if (mdev_state == NULL) 232 return -ENOMEM; 233 234 mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL); 235 if (mdev_state->vconfig == NULL) { 236 kfree(mdev_state); 237 return -ENOMEM; 238 } 239 240 fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp); 241 242 mdev_state->memblk = vmalloc_user(fbsize); 243 if (!mdev_state->memblk) { 244 kfree(mdev_state->vconfig); 245 kfree(mdev_state); 246 return -ENOMEM; 247 } 248 dev_info(dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width, 249 type->height); 250 251 mutex_init(&mdev_state->ops_lock); 252 mdev_state->mdev = mdev; 253 mdev_set_drvdata(mdev, mdev_state); 254 255 mdev_state->type = type; 256 mdev_state->memsize = fbsize; 257 mdpy_create_config_space(mdev_state); 258 mdpy_reset(mdev); 259 260 mdpy_count++; 261 return 0; 262 } 263 264 static int mdpy_remove(struct mdev_device *mdev) 265 { 266 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 267 struct device *dev = mdev_dev(mdev); 268 269 dev_info(dev, "%s\n", __func__); 270 271 mdev_set_drvdata(mdev, NULL); 272 vfree(mdev_state->memblk); 273 kfree(mdev_state->vconfig); 274 kfree(mdev_state); 275 276 mdpy_count--; 277 return 0; 278 } 279 280 static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf, 281 size_t count, loff_t *ppos) 282 { 283 unsigned int done = 0; 284 int ret; 285 286 while (count) { 287 size_t filled; 288 289 if (count >= 4 && !(*ppos % 4)) { 290 u32 val; 291 292 ret = mdev_access(mdev, (char *)&val, sizeof(val), 293 *ppos, false); 294 if (ret <= 0) 295 goto read_err; 296 297 if (copy_to_user(buf, &val, sizeof(val))) 298 goto read_err; 299 300 filled = 4; 301 } else if (count >= 2 && !(*ppos % 2)) { 302 u16 val; 303 304 ret = mdev_access(mdev, (char *)&val, sizeof(val), 305 *ppos, false); 306 if (ret <= 0) 307 goto read_err; 308 309 if (copy_to_user(buf, &val, sizeof(val))) 310 goto read_err; 311 312 filled = 2; 313 } else { 314 u8 val; 315 316 ret = mdev_access(mdev, (char *)&val, sizeof(val), 317 *ppos, false); 318 if (ret <= 0) 319 goto read_err; 320 321 if (copy_to_user(buf, &val, sizeof(val))) 322 goto read_err; 323 324 filled = 1; 325 } 326 327 count -= filled; 328 done += filled; 329 *ppos += filled; 330 buf += filled; 331 } 332 333 return done; 334 335 read_err: 336 return -EFAULT; 337 } 338 339 static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf, 340 size_t count, loff_t *ppos) 341 { 342 unsigned int done = 0; 343 int ret; 344 345 while (count) { 346 size_t filled; 347 348 if (count >= 4 && !(*ppos % 4)) { 349 u32 val; 350 351 if (copy_from_user(&val, buf, sizeof(val))) 352 goto write_err; 353 354 ret = mdev_access(mdev, (char *)&val, sizeof(val), 355 *ppos, true); 356 if (ret <= 0) 357 goto write_err; 358 359 filled = 4; 360 } else if (count >= 2 && !(*ppos % 2)) { 361 u16 val; 362 363 if (copy_from_user(&val, buf, sizeof(val))) 364 goto write_err; 365 366 ret = mdev_access(mdev, (char *)&val, sizeof(val), 367 *ppos, true); 368 if (ret <= 0) 369 goto write_err; 370 371 filled = 2; 372 } else { 373 u8 val; 374 375 if (copy_from_user(&val, buf, sizeof(val))) 376 goto write_err; 377 378 ret = mdev_access(mdev, (char *)&val, sizeof(val), 379 *ppos, true); 380 if (ret <= 0) 381 goto write_err; 382 383 filled = 1; 384 } 385 count -= filled; 386 done += filled; 387 *ppos += filled; 388 buf += filled; 389 } 390 391 return done; 392 write_err: 393 return -EFAULT; 394 } 395 396 static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) 397 { 398 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 399 400 if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT) 401 return -EINVAL; 402 if (vma->vm_end < vma->vm_start) 403 return -EINVAL; 404 if (vma->vm_end - vma->vm_start > mdev_state->memsize) 405 return -EINVAL; 406 if ((vma->vm_flags & VM_SHARED) == 0) 407 return -EINVAL; 408 409 return remap_vmalloc_range(vma, mdev_state->memblk, 0); 410 } 411 412 static int mdpy_get_region_info(struct mdev_device *mdev, 413 struct vfio_region_info *region_info, 414 u16 *cap_type_id, void **cap_type) 415 { 416 struct mdev_state *mdev_state; 417 418 mdev_state = mdev_get_drvdata(mdev); 419 if (!mdev_state) 420 return -EINVAL; 421 422 if (region_info->index >= VFIO_PCI_NUM_REGIONS && 423 region_info->index != MDPY_DISPLAY_REGION) 424 return -EINVAL; 425 426 switch (region_info->index) { 427 case VFIO_PCI_CONFIG_REGION_INDEX: 428 region_info->offset = 0; 429 region_info->size = MDPY_CONFIG_SPACE_SIZE; 430 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 431 VFIO_REGION_INFO_FLAG_WRITE); 432 break; 433 case VFIO_PCI_BAR0_REGION_INDEX: 434 case MDPY_DISPLAY_REGION: 435 region_info->offset = MDPY_MEMORY_BAR_OFFSET; 436 region_info->size = mdev_state->memsize; 437 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 438 VFIO_REGION_INFO_FLAG_WRITE | 439 VFIO_REGION_INFO_FLAG_MMAP); 440 break; 441 default: 442 region_info->size = 0; 443 region_info->offset = 0; 444 region_info->flags = 0; 445 } 446 447 return 0; 448 } 449 450 static int mdpy_get_irq_info(struct mdev_device *mdev, 451 struct vfio_irq_info *irq_info) 452 { 453 irq_info->count = 0; 454 return 0; 455 } 456 457 static int mdpy_get_device_info(struct mdev_device *mdev, 458 struct vfio_device_info *dev_info) 459 { 460 dev_info->flags = VFIO_DEVICE_FLAGS_PCI; 461 dev_info->num_regions = VFIO_PCI_NUM_REGIONS; 462 dev_info->num_irqs = VFIO_PCI_NUM_IRQS; 463 return 0; 464 } 465 466 static int mdpy_query_gfx_plane(struct mdev_device *mdev, 467 struct vfio_device_gfx_plane_info *plane) 468 { 469 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 470 471 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) { 472 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE | 473 VFIO_GFX_PLANE_TYPE_REGION)) 474 return 0; 475 return -EINVAL; 476 } 477 478 if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION) 479 return -EINVAL; 480 481 plane->drm_format = mdev_state->type->format; 482 plane->width = mdev_state->type->width; 483 plane->height = mdev_state->type->height; 484 plane->stride = (mdev_state->type->width * 485 mdev_state->type->bytepp); 486 plane->size = mdev_state->memsize; 487 plane->region_index = MDPY_DISPLAY_REGION; 488 489 /* unused */ 490 plane->drm_format_mod = 0; 491 plane->x_pos = 0; 492 plane->y_pos = 0; 493 plane->x_hot = 0; 494 plane->y_hot = 0; 495 496 return 0; 497 } 498 499 static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd, 500 unsigned long arg) 501 { 502 int ret = 0; 503 unsigned long minsz; 504 struct mdev_state *mdev_state; 505 506 mdev_state = mdev_get_drvdata(mdev); 507 508 switch (cmd) { 509 case VFIO_DEVICE_GET_INFO: 510 { 511 struct vfio_device_info info; 512 513 minsz = offsetofend(struct vfio_device_info, num_irqs); 514 515 if (copy_from_user(&info, (void __user *)arg, minsz)) 516 return -EFAULT; 517 518 if (info.argsz < minsz) 519 return -EINVAL; 520 521 ret = mdpy_get_device_info(mdev, &info); 522 if (ret) 523 return ret; 524 525 memcpy(&mdev_state->dev_info, &info, sizeof(info)); 526 527 if (copy_to_user((void __user *)arg, &info, minsz)) 528 return -EFAULT; 529 530 return 0; 531 } 532 case VFIO_DEVICE_GET_REGION_INFO: 533 { 534 struct vfio_region_info info; 535 u16 cap_type_id = 0; 536 void *cap_type = NULL; 537 538 minsz = offsetofend(struct vfio_region_info, offset); 539 540 if (copy_from_user(&info, (void __user *)arg, minsz)) 541 return -EFAULT; 542 543 if (info.argsz < minsz) 544 return -EINVAL; 545 546 ret = mdpy_get_region_info(mdev, &info, &cap_type_id, 547 &cap_type); 548 if (ret) 549 return ret; 550 551 if (copy_to_user((void __user *)arg, &info, minsz)) 552 return -EFAULT; 553 554 return 0; 555 } 556 557 case VFIO_DEVICE_GET_IRQ_INFO: 558 { 559 struct vfio_irq_info info; 560 561 minsz = offsetofend(struct vfio_irq_info, count); 562 563 if (copy_from_user(&info, (void __user *)arg, minsz)) 564 return -EFAULT; 565 566 if ((info.argsz < minsz) || 567 (info.index >= mdev_state->dev_info.num_irqs)) 568 return -EINVAL; 569 570 ret = mdpy_get_irq_info(mdev, &info); 571 if (ret) 572 return ret; 573 574 if (copy_to_user((void __user *)arg, &info, minsz)) 575 return -EFAULT; 576 577 return 0; 578 } 579 580 case VFIO_DEVICE_QUERY_GFX_PLANE: 581 { 582 struct vfio_device_gfx_plane_info plane; 583 584 minsz = offsetofend(struct vfio_device_gfx_plane_info, 585 region_index); 586 587 if (copy_from_user(&plane, (void __user *)arg, minsz)) 588 return -EFAULT; 589 590 if (plane.argsz < minsz) 591 return -EINVAL; 592 593 ret = mdpy_query_gfx_plane(mdev, &plane); 594 if (ret) 595 return ret; 596 597 if (copy_to_user((void __user *)arg, &plane, minsz)) 598 return -EFAULT; 599 600 return 0; 601 } 602 603 case VFIO_DEVICE_SET_IRQS: 604 return -EINVAL; 605 606 case VFIO_DEVICE_RESET: 607 return mdpy_reset(mdev); 608 } 609 return -ENOTTY; 610 } 611 612 static int mdpy_open(struct mdev_device *mdev) 613 { 614 if (!try_module_get(THIS_MODULE)) 615 return -ENODEV; 616 617 return 0; 618 } 619 620 static void mdpy_close(struct mdev_device *mdev) 621 { 622 module_put(THIS_MODULE); 623 } 624 625 static ssize_t 626 resolution_show(struct device *dev, struct device_attribute *attr, 627 char *buf) 628 { 629 struct mdev_device *mdev = mdev_from_dev(dev); 630 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); 631 632 return sprintf(buf, "%dx%d\n", 633 mdev_state->type->width, 634 mdev_state->type->height); 635 } 636 static DEVICE_ATTR_RO(resolution); 637 638 static struct attribute *mdev_dev_attrs[] = { 639 &dev_attr_resolution.attr, 640 NULL, 641 }; 642 643 static const struct attribute_group mdev_dev_group = { 644 .name = "vendor", 645 .attrs = mdev_dev_attrs, 646 }; 647 648 static const struct attribute_group *mdev_dev_groups[] = { 649 &mdev_dev_group, 650 NULL, 651 }; 652 653 static ssize_t name_show(struct mdev_type *mtype, 654 struct mdev_type_attribute *attr, char *buf) 655 { 656 const struct mdpy_type *type = 657 &mdpy_types[mtype_get_type_group_id(mtype)]; 658 659 return sprintf(buf, "%s\n", type->name); 660 } 661 static MDEV_TYPE_ATTR_RO(name); 662 663 static ssize_t description_show(struct mdev_type *mtype, 664 struct mdev_type_attribute *attr, char *buf) 665 { 666 const struct mdpy_type *type = 667 &mdpy_types[mtype_get_type_group_id(mtype)]; 668 669 return sprintf(buf, "virtual display, %dx%d framebuffer\n", 670 type->width, type->height); 671 } 672 static MDEV_TYPE_ATTR_RO(description); 673 674 static ssize_t available_instances_show(struct mdev_type *mtype, 675 struct mdev_type_attribute *attr, 676 char *buf) 677 { 678 return sprintf(buf, "%d\n", max_devices - mdpy_count); 679 } 680 static MDEV_TYPE_ATTR_RO(available_instances); 681 682 static ssize_t device_api_show(struct mdev_type *mtype, 683 struct mdev_type_attribute *attr, char *buf) 684 { 685 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); 686 } 687 static MDEV_TYPE_ATTR_RO(device_api); 688 689 static struct attribute *mdev_types_attrs[] = { 690 &mdev_type_attr_name.attr, 691 &mdev_type_attr_description.attr, 692 &mdev_type_attr_device_api.attr, 693 &mdev_type_attr_available_instances.attr, 694 NULL, 695 }; 696 697 static struct attribute_group mdev_type_group1 = { 698 .name = MDPY_TYPE_1, 699 .attrs = mdev_types_attrs, 700 }; 701 702 static struct attribute_group mdev_type_group2 = { 703 .name = MDPY_TYPE_2, 704 .attrs = mdev_types_attrs, 705 }; 706 707 static struct attribute_group mdev_type_group3 = { 708 .name = MDPY_TYPE_3, 709 .attrs = mdev_types_attrs, 710 }; 711 712 static struct attribute_group *mdev_type_groups[] = { 713 &mdev_type_group1, 714 &mdev_type_group2, 715 &mdev_type_group3, 716 NULL, 717 }; 718 719 static const struct mdev_parent_ops mdev_fops = { 720 .owner = THIS_MODULE, 721 .mdev_attr_groups = mdev_dev_groups, 722 .supported_type_groups = mdev_type_groups, 723 .create = mdpy_create, 724 .remove = mdpy_remove, 725 .open = mdpy_open, 726 .release = mdpy_close, 727 .read = mdpy_read, 728 .write = mdpy_write, 729 .ioctl = mdpy_ioctl, 730 .mmap = mdpy_mmap, 731 }; 732 733 static const struct file_operations vd_fops = { 734 .owner = THIS_MODULE, 735 }; 736 737 static void mdpy_device_release(struct device *dev) 738 { 739 /* nothing */ 740 } 741 742 static int __init mdpy_dev_init(void) 743 { 744 int ret = 0; 745 746 ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME); 747 if (ret < 0) { 748 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret); 749 return ret; 750 } 751 cdev_init(&mdpy_cdev, &vd_fops); 752 cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1); 753 pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt)); 754 755 mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME); 756 if (IS_ERR(mdpy_class)) { 757 pr_err("Error: failed to register mdpy_dev class\n"); 758 ret = PTR_ERR(mdpy_class); 759 goto failed1; 760 } 761 mdpy_dev.class = mdpy_class; 762 mdpy_dev.release = mdpy_device_release; 763 dev_set_name(&mdpy_dev, "%s", MDPY_NAME); 764 765 ret = device_register(&mdpy_dev); 766 if (ret) 767 goto failed2; 768 769 ret = mdev_register_device(&mdpy_dev, &mdev_fops); 770 if (ret) 771 goto failed3; 772 773 return 0; 774 775 failed3: 776 device_unregister(&mdpy_dev); 777 failed2: 778 class_destroy(mdpy_class); 779 failed1: 780 cdev_del(&mdpy_cdev); 781 unregister_chrdev_region(mdpy_devt, MINORMASK + 1); 782 return ret; 783 } 784 785 static void __exit mdpy_dev_exit(void) 786 { 787 mdpy_dev.bus = NULL; 788 mdev_unregister_device(&mdpy_dev); 789 790 device_unregister(&mdpy_dev); 791 cdev_del(&mdpy_cdev); 792 unregister_chrdev_region(mdpy_devt, MINORMASK + 1); 793 class_destroy(mdpy_class); 794 mdpy_class = NULL; 795 } 796 797 module_init(mdpy_dev_init) 798 module_exit(mdpy_dev_exit) 799