1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Mediated virtual PCI display host device driver 4 * 5 * Emulate enough of qemu stdvga to make bochs-drm.ko happy. That is 6 * basically the vram memory bar and the bochs dispi interface vbe 7 * registers in the mmio register bar. Specifically it does *not* 8 * include any legacy vga stuff. Device looks a lot like "qemu -device 9 * secondary-vga". 10 * 11 * (c) Gerd Hoffmann <kraxel@redhat.com> 12 * 13 * based on mtty driver which is: 14 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 15 * Author: Neo Jia <cjia@nvidia.com> 16 * Kirti Wankhede <kwankhede@nvidia.com> 17 * 18 * This program is free software; you can redistribute it and/or modify 19 * it under the terms of the GNU General Public License version 2 as 20 * published by the Free Software Foundation. 21 */ 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/device.h> 25 #include <linux/kernel.h> 26 #include <linux/slab.h> 27 #include <linux/vmalloc.h> 28 #include <linux/cdev.h> 29 #include <linux/vfio.h> 30 #include <linux/iommu.h> 31 #include <linux/sysfs.h> 32 #include <linux/mdev.h> 33 #include <linux/pci.h> 34 #include <linux/dma-buf.h> 35 #include <linux/highmem.h> 36 #include <drm/drm_fourcc.h> 37 #include <drm/drm_rect.h> 38 #include <drm/drm_modeset_lock.h> 39 #include <drm/drm_property.h> 40 #include <drm/drm_plane.h> 41 42 43 #define VBE_DISPI_INDEX_ID 0x0 44 #define VBE_DISPI_INDEX_XRES 0x1 45 #define VBE_DISPI_INDEX_YRES 0x2 46 #define VBE_DISPI_INDEX_BPP 0x3 47 #define VBE_DISPI_INDEX_ENABLE 0x4 48 #define VBE_DISPI_INDEX_BANK 0x5 49 #define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 50 #define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 51 #define VBE_DISPI_INDEX_X_OFFSET 0x8 52 #define VBE_DISPI_INDEX_Y_OFFSET 0x9 53 #define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa 54 #define VBE_DISPI_INDEX_COUNT 0xb 55 56 #define VBE_DISPI_ID0 0xB0C0 57 #define VBE_DISPI_ID1 0xB0C1 58 #define VBE_DISPI_ID2 0xB0C2 59 #define VBE_DISPI_ID3 0xB0C3 60 #define VBE_DISPI_ID4 0xB0C4 61 #define VBE_DISPI_ID5 0xB0C5 62 63 #define VBE_DISPI_DISABLED 0x00 64 #define VBE_DISPI_ENABLED 0x01 65 #define VBE_DISPI_GETCAPS 0x02 66 #define VBE_DISPI_8BIT_DAC 0x20 67 #define VBE_DISPI_LFB_ENABLED 0x40 68 #define VBE_DISPI_NOCLEARMEM 0x80 69 70 71 #define MBOCHS_NAME "mbochs" 72 #define MBOCHS_CLASS_NAME "mbochs" 73 74 #define MBOCHS_EDID_REGION_INDEX VFIO_PCI_NUM_REGIONS 75 #define MBOCHS_NUM_REGIONS (MBOCHS_EDID_REGION_INDEX+1) 76 77 #define MBOCHS_CONFIG_SPACE_SIZE 0xff 78 #define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE 79 #define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE 80 #define MBOCHS_EDID_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \ 81 MBOCHS_MMIO_BAR_SIZE) 82 #define MBOCHS_EDID_SIZE PAGE_SIZE 83 #define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_EDID_OFFSET + \ 84 MBOCHS_EDID_SIZE) 85 86 #define MBOCHS_EDID_BLOB_OFFSET (MBOCHS_EDID_SIZE/2) 87 88 #define STORE_LE16(addr, val) (*(u16 *)addr = val) 89 #define STORE_LE32(addr, val) (*(u32 *)addr = val) 90 91 92 MODULE_LICENSE("GPL v2"); 93 94 static int max_mbytes = 256; 95 module_param_named(count, max_mbytes, int, 0444); 96 MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices"); 97 98 99 #define MBOCHS_TYPE_1 "small" 100 #define MBOCHS_TYPE_2 "medium" 101 #define MBOCHS_TYPE_3 "large" 102 103 static const struct mbochs_type { 104 const char *name; 105 u32 mbytes; 106 u32 max_x; 107 u32 max_y; 108 } mbochs_types[] = { 109 { 110 .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1, 111 .mbytes = 4, 112 .max_x = 800, 113 .max_y = 600, 114 }, { 115 .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2, 116 .mbytes = 16, 117 .max_x = 1920, 118 .max_y = 1440, 119 }, { 120 .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3, 121 .mbytes = 64, 122 .max_x = 0, 123 .max_y = 0, 124 }, 125 }; 126 127 128 static dev_t mbochs_devt; 129 static struct class *mbochs_class; 130 static struct cdev mbochs_cdev; 131 static struct device mbochs_dev; 132 static atomic_t mbochs_avail_mbytes; 133 static const struct vfio_device_ops mbochs_dev_ops; 134 135 struct vfio_region_info_ext { 136 struct vfio_region_info base; 137 struct vfio_region_info_cap_type type; 138 }; 139 140 struct mbochs_mode { 141 u32 drm_format; 142 u32 bytepp; 143 u32 width; 144 u32 height; 145 u32 stride; 146 u32 __pad; 147 u64 offset; 148 u64 size; 149 }; 150 151 struct mbochs_dmabuf { 152 struct mbochs_mode mode; 153 u32 id; 154 struct page **pages; 155 pgoff_t pagecount; 156 struct dma_buf *buf; 157 struct mdev_state *mdev_state; 158 struct list_head next; 159 bool unlinked; 160 }; 161 162 /* State of each mdev device */ 163 struct mdev_state { 164 struct vfio_device vdev; 165 u8 *vconfig; 166 u64 bar_mask[3]; 167 u32 memory_bar_mask; 168 struct mutex ops_lock; 169 struct mdev_device *mdev; 170 171 const struct mbochs_type *type; 172 u16 vbe[VBE_DISPI_INDEX_COUNT]; 173 u64 memsize; 174 struct page **pages; 175 pgoff_t pagecount; 176 struct vfio_region_gfx_edid edid_regs; 177 u8 edid_blob[0x400]; 178 179 struct list_head dmabufs; 180 u32 active_id; 181 u32 next_id; 182 }; 183 184 static const char *vbe_name_list[VBE_DISPI_INDEX_COUNT] = { 185 [VBE_DISPI_INDEX_ID] = "id", 186 [VBE_DISPI_INDEX_XRES] = "xres", 187 [VBE_DISPI_INDEX_YRES] = "yres", 188 [VBE_DISPI_INDEX_BPP] = "bpp", 189 [VBE_DISPI_INDEX_ENABLE] = "enable", 190 [VBE_DISPI_INDEX_BANK] = "bank", 191 [VBE_DISPI_INDEX_VIRT_WIDTH] = "virt-width", 192 [VBE_DISPI_INDEX_VIRT_HEIGHT] = "virt-height", 193 [VBE_DISPI_INDEX_X_OFFSET] = "x-offset", 194 [VBE_DISPI_INDEX_Y_OFFSET] = "y-offset", 195 [VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = "video-mem", 196 }; 197 198 static const char *vbe_name(u32 index) 199 { 200 if (index < ARRAY_SIZE(vbe_name_list)) 201 return vbe_name_list[index]; 202 return "(invalid)"; 203 } 204 205 static struct page *__mbochs_get_page(struct mdev_state *mdev_state, 206 pgoff_t pgoff); 207 static struct page *mbochs_get_page(struct mdev_state *mdev_state, 208 pgoff_t pgoff); 209 210 static void mbochs_create_config_space(struct mdev_state *mdev_state) 211 { 212 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], 213 0x1234); 214 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], 215 0x1111); 216 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], 217 PCI_SUBVENDOR_ID_REDHAT_QUMRANET); 218 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], 219 PCI_SUBDEVICE_ID_QEMU); 220 221 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], 222 PCI_COMMAND_IO | PCI_COMMAND_MEMORY); 223 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], 224 PCI_CLASS_DISPLAY_OTHER); 225 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; 226 227 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], 228 PCI_BASE_ADDRESS_SPACE_MEMORY | 229 PCI_BASE_ADDRESS_MEM_TYPE_32 | 230 PCI_BASE_ADDRESS_MEM_PREFETCH); 231 mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1; 232 233 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2], 234 PCI_BASE_ADDRESS_SPACE_MEMORY | 235 PCI_BASE_ADDRESS_MEM_TYPE_32); 236 mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1; 237 } 238 239 static int mbochs_check_framebuffer(struct mdev_state *mdev_state, 240 struct mbochs_mode *mode) 241 { 242 struct device *dev = mdev_dev(mdev_state->mdev); 243 u16 *vbe = mdev_state->vbe; 244 u32 virt_width; 245 246 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 247 248 if (!(vbe[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED)) 249 goto nofb; 250 251 memset(mode, 0, sizeof(*mode)); 252 switch (vbe[VBE_DISPI_INDEX_BPP]) { 253 case 32: 254 mode->drm_format = DRM_FORMAT_XRGB8888; 255 mode->bytepp = 4; 256 break; 257 default: 258 dev_info_ratelimited(dev, "%s: bpp %d not supported\n", 259 __func__, vbe[VBE_DISPI_INDEX_BPP]); 260 goto nofb; 261 } 262 263 mode->width = vbe[VBE_DISPI_INDEX_XRES]; 264 mode->height = vbe[VBE_DISPI_INDEX_YRES]; 265 virt_width = vbe[VBE_DISPI_INDEX_VIRT_WIDTH]; 266 if (virt_width < mode->width) 267 virt_width = mode->width; 268 mode->stride = virt_width * mode->bytepp; 269 mode->size = (u64)mode->stride * mode->height; 270 mode->offset = ((u64)vbe[VBE_DISPI_INDEX_X_OFFSET] * mode->bytepp + 271 (u64)vbe[VBE_DISPI_INDEX_Y_OFFSET] * mode->stride); 272 273 if (mode->width < 64 || mode->height < 64) { 274 dev_info_ratelimited(dev, "%s: invalid resolution %dx%d\n", 275 __func__, mode->width, mode->height); 276 goto nofb; 277 } 278 if (mode->offset + mode->size > mdev_state->memsize) { 279 dev_info_ratelimited(dev, "%s: framebuffer memory overflow\n", 280 __func__); 281 goto nofb; 282 } 283 284 return 0; 285 286 nofb: 287 memset(mode, 0, sizeof(*mode)); 288 return -EINVAL; 289 } 290 291 static bool mbochs_modes_equal(struct mbochs_mode *mode1, 292 struct mbochs_mode *mode2) 293 { 294 return memcmp(mode1, mode2, sizeof(struct mbochs_mode)) == 0; 295 } 296 297 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, 298 char *buf, u32 count) 299 { 300 struct device *dev = mdev_dev(mdev_state->mdev); 301 int index = (offset - PCI_BASE_ADDRESS_0) / 0x04; 302 u32 cfg_addr; 303 304 switch (offset) { 305 case PCI_BASE_ADDRESS_0: 306 case PCI_BASE_ADDRESS_2: 307 cfg_addr = *(u32 *)buf; 308 309 if (cfg_addr == 0xffffffff) { 310 cfg_addr = (cfg_addr & mdev_state->bar_mask[index]); 311 } else { 312 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK; 313 if (cfg_addr) 314 dev_info(dev, "BAR #%d @ 0x%x\n", 315 index, cfg_addr); 316 } 317 318 cfg_addr |= (mdev_state->vconfig[offset] & 319 ~PCI_BASE_ADDRESS_MEM_MASK); 320 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); 321 break; 322 } 323 } 324 325 static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset, 326 char *buf, u32 count) 327 { 328 struct device *dev = mdev_dev(mdev_state->mdev); 329 int index; 330 u16 reg16; 331 332 switch (offset) { 333 case 0x400 ... 0x41f: /* vga ioports remapped */ 334 goto unhandled; 335 case 0x500 ... 0x515: /* bochs dispi interface */ 336 if (count != 2) 337 goto unhandled; 338 index = (offset - 0x500) / 2; 339 reg16 = *(u16 *)buf; 340 if (index < ARRAY_SIZE(mdev_state->vbe)) 341 mdev_state->vbe[index] = reg16; 342 dev_dbg(dev, "%s: vbe write %d = %d (%s)\n", 343 __func__, index, reg16, vbe_name(index)); 344 break; 345 case 0x600 ... 0x607: /* qemu extended regs */ 346 goto unhandled; 347 default: 348 unhandled: 349 dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n", 350 __func__, offset, count); 351 break; 352 } 353 } 354 355 static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset, 356 char *buf, u32 count) 357 { 358 struct device *dev = mdev_dev(mdev_state->mdev); 359 struct vfio_region_gfx_edid *edid; 360 u16 reg16 = 0; 361 int index; 362 363 switch (offset) { 364 case 0x000 ... 0x3ff: /* edid block */ 365 edid = &mdev_state->edid_regs; 366 if (edid->link_state != VFIO_DEVICE_GFX_LINK_STATE_UP || 367 offset >= edid->edid_size) { 368 memset(buf, 0, count); 369 break; 370 } 371 memcpy(buf, mdev_state->edid_blob + offset, count); 372 break; 373 case 0x500 ... 0x515: /* bochs dispi interface */ 374 if (count != 2) 375 goto unhandled; 376 index = (offset - 0x500) / 2; 377 if (index < ARRAY_SIZE(mdev_state->vbe)) 378 reg16 = mdev_state->vbe[index]; 379 dev_dbg(dev, "%s: vbe read %d = %d (%s)\n", 380 __func__, index, reg16, vbe_name(index)); 381 *(u16 *)buf = reg16; 382 break; 383 default: 384 unhandled: 385 dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n", 386 __func__, offset, count); 387 memset(buf, 0, count); 388 break; 389 } 390 } 391 392 static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset, 393 char *buf, u32 count, bool is_write) 394 { 395 char *regs = (void *)&mdev_state->edid_regs; 396 397 if (offset + count > sizeof(mdev_state->edid_regs)) 398 return; 399 if (count != 4) 400 return; 401 if (offset % 4) 402 return; 403 404 if (is_write) { 405 switch (offset) { 406 case offsetof(struct vfio_region_gfx_edid, link_state): 407 case offsetof(struct vfio_region_gfx_edid, edid_size): 408 memcpy(regs + offset, buf, count); 409 break; 410 default: 411 /* read-only regs */ 412 break; 413 } 414 } else { 415 memcpy(buf, regs + offset, count); 416 } 417 } 418 419 static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset, 420 char *buf, u32 count, bool is_write) 421 { 422 if (offset + count > mdev_state->edid_regs.edid_max_size) 423 return; 424 if (is_write) 425 memcpy(mdev_state->edid_blob + offset, buf, count); 426 else 427 memcpy(buf, mdev_state->edid_blob + offset, count); 428 } 429 430 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf, 431 size_t count, loff_t pos, bool is_write) 432 { 433 struct page *pg; 434 loff_t poff; 435 char *map; 436 int ret = 0; 437 438 mutex_lock(&mdev_state->ops_lock); 439 440 if (pos < MBOCHS_CONFIG_SPACE_SIZE) { 441 if (is_write) 442 handle_pci_cfg_write(mdev_state, pos, buf, count); 443 else 444 memcpy(buf, (mdev_state->vconfig + pos), count); 445 446 } else if (pos >= MBOCHS_MMIO_BAR_OFFSET && 447 pos + count <= (MBOCHS_MMIO_BAR_OFFSET + 448 MBOCHS_MMIO_BAR_SIZE)) { 449 pos -= MBOCHS_MMIO_BAR_OFFSET; 450 if (is_write) 451 handle_mmio_write(mdev_state, pos, buf, count); 452 else 453 handle_mmio_read(mdev_state, pos, buf, count); 454 455 } else if (pos >= MBOCHS_EDID_OFFSET && 456 pos + count <= (MBOCHS_EDID_OFFSET + 457 MBOCHS_EDID_SIZE)) { 458 pos -= MBOCHS_EDID_OFFSET; 459 if (pos < MBOCHS_EDID_BLOB_OFFSET) { 460 handle_edid_regs(mdev_state, pos, buf, count, is_write); 461 } else { 462 pos -= MBOCHS_EDID_BLOB_OFFSET; 463 handle_edid_blob(mdev_state, pos, buf, count, is_write); 464 } 465 466 } else if (pos >= MBOCHS_MEMORY_BAR_OFFSET && 467 pos + count <= 468 MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { 469 pos -= MBOCHS_MMIO_BAR_OFFSET; 470 poff = pos & ~PAGE_MASK; 471 pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); 472 map = kmap(pg); 473 if (is_write) 474 memcpy(map + poff, buf, count); 475 else 476 memcpy(buf, map + poff, count); 477 kunmap(pg); 478 put_page(pg); 479 480 } else { 481 dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n", 482 __func__, is_write ? "WR" : "RD", pos); 483 ret = -1; 484 goto accessfailed; 485 } 486 487 ret = count; 488 489 490 accessfailed: 491 mutex_unlock(&mdev_state->ops_lock); 492 493 return ret; 494 } 495 496 static int mbochs_reset(struct mdev_state *mdev_state) 497 { 498 u32 size64k = mdev_state->memsize / (64 * 1024); 499 int i; 500 501 for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++) 502 mdev_state->vbe[i] = 0; 503 mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5; 504 mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k; 505 return 0; 506 } 507 508 static int mbochs_probe(struct mdev_device *mdev) 509 { 510 int avail_mbytes = atomic_read(&mbochs_avail_mbytes); 511 const struct mbochs_type *type = 512 &mbochs_types[mdev_get_type_group_id(mdev)]; 513 struct device *dev = mdev_dev(mdev); 514 struct mdev_state *mdev_state; 515 int ret = -ENOMEM; 516 517 do { 518 if (avail_mbytes < type->mbytes) 519 return -ENOSPC; 520 } while (!atomic_try_cmpxchg(&mbochs_avail_mbytes, &avail_mbytes, 521 avail_mbytes - type->mbytes)); 522 523 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); 524 if (mdev_state == NULL) 525 goto err_avail; 526 vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops); 527 528 mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); 529 if (mdev_state->vconfig == NULL) 530 goto err_mem; 531 532 mdev_state->memsize = type->mbytes * 1024 * 1024; 533 mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; 534 mdev_state->pages = kcalloc(mdev_state->pagecount, 535 sizeof(struct page *), 536 GFP_KERNEL); 537 if (!mdev_state->pages) 538 goto err_mem; 539 540 dev_info(dev, "%s: %s, %d MB, %ld pages\n", __func__, 541 type->name, type->mbytes, mdev_state->pagecount); 542 543 mutex_init(&mdev_state->ops_lock); 544 mdev_state->mdev = mdev; 545 INIT_LIST_HEAD(&mdev_state->dmabufs); 546 mdev_state->next_id = 1; 547 548 mdev_state->type = type; 549 mdev_state->edid_regs.max_xres = type->max_x; 550 mdev_state->edid_regs.max_yres = type->max_y; 551 mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET; 552 mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob); 553 mbochs_create_config_space(mdev_state); 554 mbochs_reset(mdev_state); 555 556 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev); 557 if (ret) 558 goto err_mem; 559 dev_set_drvdata(&mdev->dev, mdev_state); 560 return 0; 561 err_mem: 562 vfio_uninit_group_dev(&mdev_state->vdev); 563 kfree(mdev_state->pages); 564 kfree(mdev_state->vconfig); 565 kfree(mdev_state); 566 err_avail: 567 atomic_add(type->mbytes, &mbochs_avail_mbytes); 568 return ret; 569 } 570 571 static void mbochs_remove(struct mdev_device *mdev) 572 { 573 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); 574 575 vfio_unregister_group_dev(&mdev_state->vdev); 576 vfio_uninit_group_dev(&mdev_state->vdev); 577 atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); 578 kfree(mdev_state->pages); 579 kfree(mdev_state->vconfig); 580 kfree(mdev_state); 581 } 582 583 static ssize_t mbochs_read(struct vfio_device *vdev, char __user *buf, 584 size_t count, loff_t *ppos) 585 { 586 struct mdev_state *mdev_state = 587 container_of(vdev, struct mdev_state, vdev); 588 unsigned int done = 0; 589 int ret; 590 591 while (count) { 592 size_t filled; 593 594 if (count >= 4 && !(*ppos % 4)) { 595 u32 val; 596 597 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 598 *ppos, false); 599 if (ret <= 0) 600 goto read_err; 601 602 if (copy_to_user(buf, &val, sizeof(val))) 603 goto read_err; 604 605 filled = 4; 606 } else if (count >= 2 && !(*ppos % 2)) { 607 u16 val; 608 609 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 610 *ppos, false); 611 if (ret <= 0) 612 goto read_err; 613 614 if (copy_to_user(buf, &val, sizeof(val))) 615 goto read_err; 616 617 filled = 2; 618 } else { 619 u8 val; 620 621 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 622 *ppos, false); 623 if (ret <= 0) 624 goto read_err; 625 626 if (copy_to_user(buf, &val, sizeof(val))) 627 goto read_err; 628 629 filled = 1; 630 } 631 632 count -= filled; 633 done += filled; 634 *ppos += filled; 635 buf += filled; 636 } 637 638 return done; 639 640 read_err: 641 return -EFAULT; 642 } 643 644 static ssize_t mbochs_write(struct vfio_device *vdev, const char __user *buf, 645 size_t count, loff_t *ppos) 646 { 647 struct mdev_state *mdev_state = 648 container_of(vdev, struct mdev_state, vdev); 649 unsigned int done = 0; 650 int ret; 651 652 while (count) { 653 size_t filled; 654 655 if (count >= 4 && !(*ppos % 4)) { 656 u32 val; 657 658 if (copy_from_user(&val, buf, sizeof(val))) 659 goto write_err; 660 661 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 662 *ppos, true); 663 if (ret <= 0) 664 goto write_err; 665 666 filled = 4; 667 } else if (count >= 2 && !(*ppos % 2)) { 668 u16 val; 669 670 if (copy_from_user(&val, buf, sizeof(val))) 671 goto write_err; 672 673 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 674 *ppos, true); 675 if (ret <= 0) 676 goto write_err; 677 678 filled = 2; 679 } else { 680 u8 val; 681 682 if (copy_from_user(&val, buf, sizeof(val))) 683 goto write_err; 684 685 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), 686 *ppos, true); 687 if (ret <= 0) 688 goto write_err; 689 690 filled = 1; 691 } 692 count -= filled; 693 done += filled; 694 *ppos += filled; 695 buf += filled; 696 } 697 698 return done; 699 write_err: 700 return -EFAULT; 701 } 702 703 static struct page *__mbochs_get_page(struct mdev_state *mdev_state, 704 pgoff_t pgoff) 705 { 706 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 707 708 if (!mdev_state->pages[pgoff]) { 709 mdev_state->pages[pgoff] = 710 alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0); 711 if (!mdev_state->pages[pgoff]) 712 return NULL; 713 } 714 715 get_page(mdev_state->pages[pgoff]); 716 return mdev_state->pages[pgoff]; 717 } 718 719 static struct page *mbochs_get_page(struct mdev_state *mdev_state, 720 pgoff_t pgoff) 721 { 722 struct page *page; 723 724 if (WARN_ON(pgoff >= mdev_state->pagecount)) 725 return NULL; 726 727 mutex_lock(&mdev_state->ops_lock); 728 page = __mbochs_get_page(mdev_state, pgoff); 729 mutex_unlock(&mdev_state->ops_lock); 730 731 return page; 732 } 733 734 static void mbochs_put_pages(struct mdev_state *mdev_state) 735 { 736 struct device *dev = mdev_dev(mdev_state->mdev); 737 int i, count = 0; 738 739 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 740 741 for (i = 0; i < mdev_state->pagecount; i++) { 742 if (!mdev_state->pages[i]) 743 continue; 744 put_page(mdev_state->pages[i]); 745 mdev_state->pages[i] = NULL; 746 count++; 747 } 748 dev_dbg(dev, "%s: %d pages released\n", __func__, count); 749 } 750 751 static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf) 752 { 753 struct vm_area_struct *vma = vmf->vma; 754 struct mdev_state *mdev_state = vma->vm_private_data; 755 pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; 756 757 if (page_offset >= mdev_state->pagecount) 758 return VM_FAULT_SIGBUS; 759 760 vmf->page = mbochs_get_page(mdev_state, page_offset); 761 if (!vmf->page) 762 return VM_FAULT_SIGBUS; 763 764 return 0; 765 } 766 767 static const struct vm_operations_struct mbochs_region_vm_ops = { 768 .fault = mbochs_region_vm_fault, 769 }; 770 771 static int mbochs_mmap(struct vfio_device *vdev, struct vm_area_struct *vma) 772 { 773 struct mdev_state *mdev_state = 774 container_of(vdev, struct mdev_state, vdev); 775 776 if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT) 777 return -EINVAL; 778 if (vma->vm_end < vma->vm_start) 779 return -EINVAL; 780 if (vma->vm_end - vma->vm_start > mdev_state->memsize) 781 return -EINVAL; 782 if ((vma->vm_flags & VM_SHARED) == 0) 783 return -EINVAL; 784 785 vma->vm_ops = &mbochs_region_vm_ops; 786 vma->vm_private_data = mdev_state; 787 return 0; 788 } 789 790 static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf) 791 { 792 struct vm_area_struct *vma = vmf->vma; 793 struct mbochs_dmabuf *dmabuf = vma->vm_private_data; 794 795 if (WARN_ON(vmf->pgoff >= dmabuf->pagecount)) 796 return VM_FAULT_SIGBUS; 797 798 vmf->page = dmabuf->pages[vmf->pgoff]; 799 get_page(vmf->page); 800 return 0; 801 } 802 803 static const struct vm_operations_struct mbochs_dmabuf_vm_ops = { 804 .fault = mbochs_dmabuf_vm_fault, 805 }; 806 807 static int mbochs_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma) 808 { 809 struct mbochs_dmabuf *dmabuf = buf->priv; 810 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); 811 812 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); 813 814 if ((vma->vm_flags & VM_SHARED) == 0) 815 return -EINVAL; 816 817 vma->vm_ops = &mbochs_dmabuf_vm_ops; 818 vma->vm_private_data = dmabuf; 819 return 0; 820 } 821 822 static void mbochs_print_dmabuf(struct mbochs_dmabuf *dmabuf, 823 const char *prefix) 824 { 825 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); 826 u32 fourcc = dmabuf->mode.drm_format; 827 828 dev_dbg(dev, "%s/%d: %c%c%c%c, %dx%d, stride %d, off 0x%llx, size 0x%llx, pages %ld\n", 829 prefix, dmabuf->id, 830 fourcc ? ((fourcc >> 0) & 0xff) : '-', 831 fourcc ? ((fourcc >> 8) & 0xff) : '-', 832 fourcc ? ((fourcc >> 16) & 0xff) : '-', 833 fourcc ? ((fourcc >> 24) & 0xff) : '-', 834 dmabuf->mode.width, dmabuf->mode.height, dmabuf->mode.stride, 835 dmabuf->mode.offset, dmabuf->mode.size, dmabuf->pagecount); 836 } 837 838 static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at, 839 enum dma_data_direction direction) 840 { 841 struct mbochs_dmabuf *dmabuf = at->dmabuf->priv; 842 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); 843 struct sg_table *sg; 844 845 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); 846 847 sg = kzalloc(sizeof(*sg), GFP_KERNEL); 848 if (!sg) 849 goto err1; 850 if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount, 851 0, dmabuf->mode.size, GFP_KERNEL) < 0) 852 goto err2; 853 if (dma_map_sgtable(at->dev, sg, direction, 0)) 854 goto err3; 855 856 return sg; 857 858 err3: 859 sg_free_table(sg); 860 err2: 861 kfree(sg); 862 err1: 863 return ERR_PTR(-ENOMEM); 864 } 865 866 static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at, 867 struct sg_table *sg, 868 enum dma_data_direction direction) 869 { 870 struct mbochs_dmabuf *dmabuf = at->dmabuf->priv; 871 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); 872 873 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); 874 875 dma_unmap_sgtable(at->dev, sg, direction, 0); 876 sg_free_table(sg); 877 kfree(sg); 878 } 879 880 static void mbochs_release_dmabuf(struct dma_buf *buf) 881 { 882 struct mbochs_dmabuf *dmabuf = buf->priv; 883 struct mdev_state *mdev_state = dmabuf->mdev_state; 884 struct device *dev = mdev_dev(mdev_state->mdev); 885 pgoff_t pg; 886 887 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); 888 889 for (pg = 0; pg < dmabuf->pagecount; pg++) 890 put_page(dmabuf->pages[pg]); 891 892 mutex_lock(&mdev_state->ops_lock); 893 dmabuf->buf = NULL; 894 if (dmabuf->unlinked) 895 kfree(dmabuf); 896 mutex_unlock(&mdev_state->ops_lock); 897 } 898 899 static struct dma_buf_ops mbochs_dmabuf_ops = { 900 .map_dma_buf = mbochs_map_dmabuf, 901 .unmap_dma_buf = mbochs_unmap_dmabuf, 902 .release = mbochs_release_dmabuf, 903 .mmap = mbochs_mmap_dmabuf, 904 }; 905 906 static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state, 907 struct mbochs_mode *mode) 908 { 909 struct mbochs_dmabuf *dmabuf; 910 pgoff_t page_offset, pg; 911 912 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 913 914 dmabuf = kzalloc(sizeof(struct mbochs_dmabuf), GFP_KERNEL); 915 if (!dmabuf) 916 return NULL; 917 918 dmabuf->mode = *mode; 919 dmabuf->id = mdev_state->next_id++; 920 dmabuf->pagecount = DIV_ROUND_UP(mode->size, PAGE_SIZE); 921 dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *), 922 GFP_KERNEL); 923 if (!dmabuf->pages) 924 goto err_free_dmabuf; 925 926 page_offset = dmabuf->mode.offset >> PAGE_SHIFT; 927 for (pg = 0; pg < dmabuf->pagecount; pg++) { 928 dmabuf->pages[pg] = __mbochs_get_page(mdev_state, 929 page_offset + pg); 930 if (!dmabuf->pages[pg]) 931 goto err_free_pages; 932 } 933 934 dmabuf->mdev_state = mdev_state; 935 list_add(&dmabuf->next, &mdev_state->dmabufs); 936 937 mbochs_print_dmabuf(dmabuf, __func__); 938 return dmabuf; 939 940 err_free_pages: 941 while (pg > 0) 942 put_page(dmabuf->pages[--pg]); 943 kfree(dmabuf->pages); 944 err_free_dmabuf: 945 kfree(dmabuf); 946 return NULL; 947 } 948 949 static struct mbochs_dmabuf * 950 mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state, 951 struct mbochs_mode *mode) 952 { 953 struct mbochs_dmabuf *dmabuf; 954 955 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 956 957 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) 958 if (mbochs_modes_equal(&dmabuf->mode, mode)) 959 return dmabuf; 960 961 return NULL; 962 } 963 964 static struct mbochs_dmabuf * 965 mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id) 966 { 967 struct mbochs_dmabuf *dmabuf; 968 969 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 970 971 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) 972 if (dmabuf->id == id) 973 return dmabuf; 974 975 return NULL; 976 } 977 978 static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf) 979 { 980 struct mdev_state *mdev_state = dmabuf->mdev_state; 981 struct device *dev = mdev_state->vdev.dev; 982 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 983 struct dma_buf *buf; 984 985 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); 986 987 if (!IS_ALIGNED(dmabuf->mode.offset, PAGE_SIZE)) { 988 dev_info_ratelimited(dev, "%s: framebuffer not page-aligned\n", 989 __func__); 990 return -EINVAL; 991 } 992 993 exp_info.ops = &mbochs_dmabuf_ops; 994 exp_info.size = dmabuf->mode.size; 995 exp_info.priv = dmabuf; 996 997 buf = dma_buf_export(&exp_info); 998 if (IS_ERR(buf)) { 999 dev_info_ratelimited(dev, "%s: dma_buf_export failed: %ld\n", 1000 __func__, PTR_ERR(buf)); 1001 return PTR_ERR(buf); 1002 } 1003 1004 dmabuf->buf = buf; 1005 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id); 1006 return 0; 1007 } 1008 1009 static int mbochs_get_region_info(struct mdev_state *mdev_state, 1010 struct vfio_region_info_ext *ext) 1011 { 1012 struct vfio_region_info *region_info = &ext->base; 1013 1014 if (region_info->index >= MBOCHS_NUM_REGIONS) 1015 return -EINVAL; 1016 1017 switch (region_info->index) { 1018 case VFIO_PCI_CONFIG_REGION_INDEX: 1019 region_info->offset = 0; 1020 region_info->size = MBOCHS_CONFIG_SPACE_SIZE; 1021 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 1022 VFIO_REGION_INFO_FLAG_WRITE); 1023 break; 1024 case VFIO_PCI_BAR0_REGION_INDEX: 1025 region_info->offset = MBOCHS_MEMORY_BAR_OFFSET; 1026 region_info->size = mdev_state->memsize; 1027 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 1028 VFIO_REGION_INFO_FLAG_WRITE | 1029 VFIO_REGION_INFO_FLAG_MMAP); 1030 break; 1031 case VFIO_PCI_BAR2_REGION_INDEX: 1032 region_info->offset = MBOCHS_MMIO_BAR_OFFSET; 1033 region_info->size = MBOCHS_MMIO_BAR_SIZE; 1034 region_info->flags = (VFIO_REGION_INFO_FLAG_READ | 1035 VFIO_REGION_INFO_FLAG_WRITE); 1036 break; 1037 case MBOCHS_EDID_REGION_INDEX: 1038 ext->base.argsz = sizeof(*ext); 1039 ext->base.offset = MBOCHS_EDID_OFFSET; 1040 ext->base.size = MBOCHS_EDID_SIZE; 1041 ext->base.flags = (VFIO_REGION_INFO_FLAG_READ | 1042 VFIO_REGION_INFO_FLAG_WRITE | 1043 VFIO_REGION_INFO_FLAG_CAPS); 1044 ext->base.cap_offset = offsetof(typeof(*ext), type); 1045 ext->type.header.id = VFIO_REGION_INFO_CAP_TYPE; 1046 ext->type.header.version = 1; 1047 ext->type.header.next = 0; 1048 ext->type.type = VFIO_REGION_TYPE_GFX; 1049 ext->type.subtype = VFIO_REGION_SUBTYPE_GFX_EDID; 1050 break; 1051 default: 1052 region_info->size = 0; 1053 region_info->offset = 0; 1054 region_info->flags = 0; 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int mbochs_get_irq_info(struct vfio_irq_info *irq_info) 1061 { 1062 irq_info->count = 0; 1063 return 0; 1064 } 1065 1066 static int mbochs_get_device_info(struct vfio_device_info *dev_info) 1067 { 1068 dev_info->flags = VFIO_DEVICE_FLAGS_PCI; 1069 dev_info->num_regions = MBOCHS_NUM_REGIONS; 1070 dev_info->num_irqs = VFIO_PCI_NUM_IRQS; 1071 return 0; 1072 } 1073 1074 static int mbochs_query_gfx_plane(struct mdev_state *mdev_state, 1075 struct vfio_device_gfx_plane_info *plane) 1076 { 1077 struct mbochs_dmabuf *dmabuf; 1078 struct mbochs_mode mode; 1079 int ret; 1080 1081 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) { 1082 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE | 1083 VFIO_GFX_PLANE_TYPE_DMABUF)) 1084 return 0; 1085 return -EINVAL; 1086 } 1087 1088 if (plane->flags != VFIO_GFX_PLANE_TYPE_DMABUF) 1089 return -EINVAL; 1090 1091 plane->drm_format_mod = 0; 1092 plane->x_pos = 0; 1093 plane->y_pos = 0; 1094 plane->x_hot = 0; 1095 plane->y_hot = 0; 1096 1097 mutex_lock(&mdev_state->ops_lock); 1098 1099 ret = -EINVAL; 1100 if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY) 1101 ret = mbochs_check_framebuffer(mdev_state, &mode); 1102 if (ret < 0) { 1103 plane->drm_format = 0; 1104 plane->width = 0; 1105 plane->height = 0; 1106 plane->stride = 0; 1107 plane->size = 0; 1108 plane->dmabuf_id = 0; 1109 goto done; 1110 } 1111 1112 dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode); 1113 if (!dmabuf) 1114 mbochs_dmabuf_alloc(mdev_state, &mode); 1115 if (!dmabuf) { 1116 mutex_unlock(&mdev_state->ops_lock); 1117 return -ENOMEM; 1118 } 1119 1120 plane->drm_format = dmabuf->mode.drm_format; 1121 plane->width = dmabuf->mode.width; 1122 plane->height = dmabuf->mode.height; 1123 plane->stride = dmabuf->mode.stride; 1124 plane->size = dmabuf->mode.size; 1125 plane->dmabuf_id = dmabuf->id; 1126 1127 done: 1128 if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY && 1129 mdev_state->active_id != plane->dmabuf_id) { 1130 dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n", 1131 __func__, mdev_state->active_id, plane->dmabuf_id); 1132 mdev_state->active_id = plane->dmabuf_id; 1133 } 1134 mutex_unlock(&mdev_state->ops_lock); 1135 return 0; 1136 } 1137 1138 static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id) 1139 { 1140 struct mbochs_dmabuf *dmabuf; 1141 1142 mutex_lock(&mdev_state->ops_lock); 1143 1144 dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id); 1145 if (!dmabuf) { 1146 mutex_unlock(&mdev_state->ops_lock); 1147 return -ENOENT; 1148 } 1149 1150 if (!dmabuf->buf) 1151 mbochs_dmabuf_export(dmabuf); 1152 1153 mutex_unlock(&mdev_state->ops_lock); 1154 1155 if (!dmabuf->buf) 1156 return -EINVAL; 1157 1158 return dma_buf_fd(dmabuf->buf, 0); 1159 } 1160 1161 static long mbochs_ioctl(struct vfio_device *vdev, unsigned int cmd, 1162 unsigned long arg) 1163 { 1164 struct mdev_state *mdev_state = 1165 container_of(vdev, struct mdev_state, vdev); 1166 int ret = 0; 1167 unsigned long minsz, outsz; 1168 1169 switch (cmd) { 1170 case VFIO_DEVICE_GET_INFO: 1171 { 1172 struct vfio_device_info info; 1173 1174 minsz = offsetofend(struct vfio_device_info, num_irqs); 1175 1176 if (copy_from_user(&info, (void __user *)arg, minsz)) 1177 return -EFAULT; 1178 1179 if (info.argsz < minsz) 1180 return -EINVAL; 1181 1182 ret = mbochs_get_device_info(&info); 1183 if (ret) 1184 return ret; 1185 1186 if (copy_to_user((void __user *)arg, &info, minsz)) 1187 return -EFAULT; 1188 1189 return 0; 1190 } 1191 case VFIO_DEVICE_GET_REGION_INFO: 1192 { 1193 struct vfio_region_info_ext info; 1194 1195 minsz = offsetofend(typeof(info), base.offset); 1196 1197 if (copy_from_user(&info, (void __user *)arg, minsz)) 1198 return -EFAULT; 1199 1200 outsz = info.base.argsz; 1201 if (outsz < minsz) 1202 return -EINVAL; 1203 if (outsz > sizeof(info)) 1204 return -EINVAL; 1205 1206 ret = mbochs_get_region_info(mdev_state, &info); 1207 if (ret) 1208 return ret; 1209 1210 if (copy_to_user((void __user *)arg, &info, outsz)) 1211 return -EFAULT; 1212 1213 return 0; 1214 } 1215 1216 case VFIO_DEVICE_GET_IRQ_INFO: 1217 { 1218 struct vfio_irq_info info; 1219 1220 minsz = offsetofend(struct vfio_irq_info, count); 1221 1222 if (copy_from_user(&info, (void __user *)arg, minsz)) 1223 return -EFAULT; 1224 1225 if ((info.argsz < minsz) || 1226 (info.index >= VFIO_PCI_NUM_IRQS)) 1227 return -EINVAL; 1228 1229 ret = mbochs_get_irq_info(&info); 1230 if (ret) 1231 return ret; 1232 1233 if (copy_to_user((void __user *)arg, &info, minsz)) 1234 return -EFAULT; 1235 1236 return 0; 1237 } 1238 1239 case VFIO_DEVICE_QUERY_GFX_PLANE: 1240 { 1241 struct vfio_device_gfx_plane_info plane; 1242 1243 minsz = offsetofend(struct vfio_device_gfx_plane_info, 1244 region_index); 1245 1246 if (copy_from_user(&plane, (void __user *)arg, minsz)) 1247 return -EFAULT; 1248 1249 if (plane.argsz < minsz) 1250 return -EINVAL; 1251 1252 ret = mbochs_query_gfx_plane(mdev_state, &plane); 1253 if (ret) 1254 return ret; 1255 1256 if (copy_to_user((void __user *)arg, &plane, minsz)) 1257 return -EFAULT; 1258 1259 return 0; 1260 } 1261 1262 case VFIO_DEVICE_GET_GFX_DMABUF: 1263 { 1264 u32 dmabuf_id; 1265 1266 if (get_user(dmabuf_id, (__u32 __user *)arg)) 1267 return -EFAULT; 1268 1269 return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id); 1270 } 1271 1272 case VFIO_DEVICE_SET_IRQS: 1273 return -EINVAL; 1274 1275 case VFIO_DEVICE_RESET: 1276 return mbochs_reset(mdev_state); 1277 } 1278 return -ENOTTY; 1279 } 1280 1281 static void mbochs_close_device(struct vfio_device *vdev) 1282 { 1283 struct mdev_state *mdev_state = 1284 container_of(vdev, struct mdev_state, vdev); 1285 struct mbochs_dmabuf *dmabuf, *tmp; 1286 1287 mutex_lock(&mdev_state->ops_lock); 1288 1289 list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) { 1290 list_del(&dmabuf->next); 1291 if (dmabuf->buf) { 1292 /* free in mbochs_release_dmabuf() */ 1293 dmabuf->unlinked = true; 1294 } else { 1295 kfree(dmabuf); 1296 } 1297 } 1298 mbochs_put_pages(mdev_state); 1299 1300 mutex_unlock(&mdev_state->ops_lock); 1301 } 1302 1303 static ssize_t 1304 memory_show(struct device *dev, struct device_attribute *attr, 1305 char *buf) 1306 { 1307 struct mdev_state *mdev_state = dev_get_drvdata(dev); 1308 1309 return sprintf(buf, "%d MB\n", mdev_state->type->mbytes); 1310 } 1311 static DEVICE_ATTR_RO(memory); 1312 1313 static struct attribute *mdev_dev_attrs[] = { 1314 &dev_attr_memory.attr, 1315 NULL, 1316 }; 1317 1318 static const struct attribute_group mdev_dev_group = { 1319 .name = "vendor", 1320 .attrs = mdev_dev_attrs, 1321 }; 1322 1323 static const struct attribute_group *mdev_dev_groups[] = { 1324 &mdev_dev_group, 1325 NULL, 1326 }; 1327 1328 static ssize_t name_show(struct mdev_type *mtype, 1329 struct mdev_type_attribute *attr, char *buf) 1330 { 1331 const struct mbochs_type *type = 1332 &mbochs_types[mtype_get_type_group_id(mtype)]; 1333 1334 return sprintf(buf, "%s\n", type->name); 1335 } 1336 static MDEV_TYPE_ATTR_RO(name); 1337 1338 static ssize_t description_show(struct mdev_type *mtype, 1339 struct mdev_type_attribute *attr, char *buf) 1340 { 1341 const struct mbochs_type *type = 1342 &mbochs_types[mtype_get_type_group_id(mtype)]; 1343 1344 return sprintf(buf, "virtual display, %d MB video memory\n", 1345 type ? type->mbytes : 0); 1346 } 1347 static MDEV_TYPE_ATTR_RO(description); 1348 1349 static ssize_t available_instances_show(struct mdev_type *mtype, 1350 struct mdev_type_attribute *attr, 1351 char *buf) 1352 { 1353 const struct mbochs_type *type = 1354 &mbochs_types[mtype_get_type_group_id(mtype)]; 1355 int count = atomic_read(&mbochs_avail_mbytes) / type->mbytes; 1356 1357 return sprintf(buf, "%d\n", count); 1358 } 1359 static MDEV_TYPE_ATTR_RO(available_instances); 1360 1361 static ssize_t device_api_show(struct mdev_type *mtype, 1362 struct mdev_type_attribute *attr, char *buf) 1363 { 1364 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); 1365 } 1366 static MDEV_TYPE_ATTR_RO(device_api); 1367 1368 static struct attribute *mdev_types_attrs[] = { 1369 &mdev_type_attr_name.attr, 1370 &mdev_type_attr_description.attr, 1371 &mdev_type_attr_device_api.attr, 1372 &mdev_type_attr_available_instances.attr, 1373 NULL, 1374 }; 1375 1376 static struct attribute_group mdev_type_group1 = { 1377 .name = MBOCHS_TYPE_1, 1378 .attrs = mdev_types_attrs, 1379 }; 1380 1381 static struct attribute_group mdev_type_group2 = { 1382 .name = MBOCHS_TYPE_2, 1383 .attrs = mdev_types_attrs, 1384 }; 1385 1386 static struct attribute_group mdev_type_group3 = { 1387 .name = MBOCHS_TYPE_3, 1388 .attrs = mdev_types_attrs, 1389 }; 1390 1391 static struct attribute_group *mdev_type_groups[] = { 1392 &mdev_type_group1, 1393 &mdev_type_group2, 1394 &mdev_type_group3, 1395 NULL, 1396 }; 1397 1398 static const struct vfio_device_ops mbochs_dev_ops = { 1399 .close_device = mbochs_close_device, 1400 .read = mbochs_read, 1401 .write = mbochs_write, 1402 .ioctl = mbochs_ioctl, 1403 .mmap = mbochs_mmap, 1404 }; 1405 1406 static struct mdev_driver mbochs_driver = { 1407 .driver = { 1408 .name = "mbochs", 1409 .owner = THIS_MODULE, 1410 .mod_name = KBUILD_MODNAME, 1411 .dev_groups = mdev_dev_groups, 1412 }, 1413 .probe = mbochs_probe, 1414 .remove = mbochs_remove, 1415 }; 1416 1417 static const struct mdev_parent_ops mdev_fops = { 1418 .owner = THIS_MODULE, 1419 .device_driver = &mbochs_driver, 1420 .supported_type_groups = mdev_type_groups, 1421 }; 1422 1423 static const struct file_operations vd_fops = { 1424 .owner = THIS_MODULE, 1425 }; 1426 1427 static void mbochs_device_release(struct device *dev) 1428 { 1429 /* nothing */ 1430 } 1431 1432 static int __init mbochs_dev_init(void) 1433 { 1434 int ret = 0; 1435 1436 atomic_set(&mbochs_avail_mbytes, max_mbytes); 1437 1438 ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK + 1, MBOCHS_NAME); 1439 if (ret < 0) { 1440 pr_err("Error: failed to register mbochs_dev, err: %d\n", ret); 1441 return ret; 1442 } 1443 cdev_init(&mbochs_cdev, &vd_fops); 1444 cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK + 1); 1445 pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt)); 1446 1447 ret = mdev_register_driver(&mbochs_driver); 1448 if (ret) 1449 goto err_cdev; 1450 1451 mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME); 1452 if (IS_ERR(mbochs_class)) { 1453 pr_err("Error: failed to register mbochs_dev class\n"); 1454 ret = PTR_ERR(mbochs_class); 1455 goto err_driver; 1456 } 1457 mbochs_dev.class = mbochs_class; 1458 mbochs_dev.release = mbochs_device_release; 1459 dev_set_name(&mbochs_dev, "%s", MBOCHS_NAME); 1460 1461 ret = device_register(&mbochs_dev); 1462 if (ret) 1463 goto err_class; 1464 1465 ret = mdev_register_device(&mbochs_dev, &mdev_fops); 1466 if (ret) 1467 goto err_device; 1468 1469 return 0; 1470 1471 err_device: 1472 device_unregister(&mbochs_dev); 1473 err_class: 1474 class_destroy(mbochs_class); 1475 err_driver: 1476 mdev_unregister_driver(&mbochs_driver); 1477 err_cdev: 1478 cdev_del(&mbochs_cdev); 1479 unregister_chrdev_region(mbochs_devt, MINORMASK + 1); 1480 return ret; 1481 } 1482 1483 static void __exit mbochs_dev_exit(void) 1484 { 1485 mbochs_dev.bus = NULL; 1486 mdev_unregister_device(&mbochs_dev); 1487 1488 device_unregister(&mbochs_dev); 1489 mdev_unregister_driver(&mbochs_driver); 1490 cdev_del(&mbochs_cdev); 1491 unregister_chrdev_region(mbochs_devt, MINORMASK + 1); 1492 class_destroy(mbochs_class); 1493 mbochs_class = NULL; 1494 } 1495 1496 MODULE_IMPORT_NS(DMA_BUF); 1497 module_init(mbochs_dev_init) 1498 module_exit(mbochs_dev_exit) 1499