1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * Copyright © 2014 The Chromium OS Authors 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software") 7 * to deal in the software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * them Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER 20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Adam Jackson <ajax@redhat.com> 25 * Ben Widawsky <ben@bwidawsk.net> 26 */ 27 28 /** 29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's 30 * software renderer and the X server for efficient buffer sharing. 31 */ 32 33 #include <linux/dma-buf.h> 34 #include <linux/module.h> 35 #include <linux/platform_device.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/vmalloc.h> 38 39 #include <drm/drm_drv.h> 40 #include <drm/drm_file.h> 41 #include <drm/drm_ioctl.h> 42 #include <drm/drm_prime.h> 43 44 #include "vgem_drv.h" 45 46 #define DRIVER_NAME "vgem" 47 #define DRIVER_DESC "Virtual GEM provider" 48 #define DRIVER_DATE "20120112" 49 #define DRIVER_MAJOR 1 50 #define DRIVER_MINOR 0 51 52 static struct vgem_device { 53 struct drm_device drm; 54 struct platform_device *platform; 55 } *vgem_device; 56 57 static void vgem_gem_free_object(struct drm_gem_object *obj) 58 { 59 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 60 61 kvfree(vgem_obj->pages); 62 mutex_destroy(&vgem_obj->pages_lock); 63 64 if (obj->import_attach) 65 drm_prime_gem_destroy(obj, vgem_obj->table); 66 67 drm_gem_object_release(obj); 68 kfree(vgem_obj); 69 } 70 71 static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) 72 { 73 struct vm_area_struct *vma = vmf->vma; 74 struct drm_vgem_gem_object *obj = vma->vm_private_data; 75 /* We don't use vmf->pgoff since that has the fake offset */ 76 unsigned long vaddr = vmf->address; 77 vm_fault_t ret = VM_FAULT_SIGBUS; 78 loff_t num_pages; 79 pgoff_t page_offset; 80 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; 81 82 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 83 84 if (page_offset >= num_pages) 85 return VM_FAULT_SIGBUS; 86 87 mutex_lock(&obj->pages_lock); 88 if (obj->pages) { 89 get_page(obj->pages[page_offset]); 90 vmf->page = obj->pages[page_offset]; 91 ret = 0; 92 } 93 mutex_unlock(&obj->pages_lock); 94 if (ret) { 95 struct page *page; 96 97 page = shmem_read_mapping_page( 98 file_inode(obj->base.filp)->i_mapping, 99 page_offset); 100 if (!IS_ERR(page)) { 101 vmf->page = page; 102 ret = 0; 103 } else switch (PTR_ERR(page)) { 104 case -ENOSPC: 105 case -ENOMEM: 106 ret = VM_FAULT_OOM; 107 break; 108 case -EBUSY: 109 ret = VM_FAULT_RETRY; 110 break; 111 case -EFAULT: 112 case -EINVAL: 113 ret = VM_FAULT_SIGBUS; 114 break; 115 default: 116 WARN_ON(PTR_ERR(page)); 117 ret = VM_FAULT_SIGBUS; 118 break; 119 } 120 121 } 122 return ret; 123 } 124 125 static const struct vm_operations_struct vgem_gem_vm_ops = { 126 .fault = vgem_gem_fault, 127 .open = drm_gem_vm_open, 128 .close = drm_gem_vm_close, 129 }; 130 131 static int vgem_open(struct drm_device *dev, struct drm_file *file) 132 { 133 struct vgem_file *vfile; 134 int ret; 135 136 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL); 137 if (!vfile) 138 return -ENOMEM; 139 140 file->driver_priv = vfile; 141 142 ret = vgem_fence_open(vfile); 143 if (ret) { 144 kfree(vfile); 145 return ret; 146 } 147 148 return 0; 149 } 150 151 static void vgem_postclose(struct drm_device *dev, struct drm_file *file) 152 { 153 struct vgem_file *vfile = file->driver_priv; 154 155 vgem_fence_close(vfile); 156 kfree(vfile); 157 } 158 159 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, 160 unsigned long size) 161 { 162 struct drm_vgem_gem_object *obj; 163 int ret; 164 165 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 166 if (!obj) 167 return ERR_PTR(-ENOMEM); 168 169 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); 170 if (ret) { 171 kfree(obj); 172 return ERR_PTR(ret); 173 } 174 175 mutex_init(&obj->pages_lock); 176 177 return obj; 178 } 179 180 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj) 181 { 182 drm_gem_object_release(&obj->base); 183 kfree(obj); 184 } 185 186 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, 187 struct drm_file *file, 188 unsigned int *handle, 189 unsigned long size) 190 { 191 struct drm_vgem_gem_object *obj; 192 int ret; 193 194 obj = __vgem_gem_create(dev, size); 195 if (IS_ERR(obj)) 196 return ERR_CAST(obj); 197 198 ret = drm_gem_handle_create(file, &obj->base, handle); 199 if (ret) { 200 drm_gem_object_put_unlocked(&obj->base); 201 return ERR_PTR(ret); 202 } 203 204 return &obj->base; 205 } 206 207 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 208 struct drm_mode_create_dumb *args) 209 { 210 struct drm_gem_object *gem_object; 211 u64 pitch, size; 212 213 pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 214 size = args->height * pitch; 215 if (size == 0) 216 return -EINVAL; 217 218 gem_object = vgem_gem_create(dev, file, &args->handle, size); 219 if (IS_ERR(gem_object)) 220 return PTR_ERR(gem_object); 221 222 args->size = gem_object->size; 223 args->pitch = pitch; 224 225 drm_gem_object_put_unlocked(gem_object); 226 227 DRM_DEBUG("Created object of size %llu\n", args->size); 228 229 return 0; 230 } 231 232 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 233 uint32_t handle, uint64_t *offset) 234 { 235 struct drm_gem_object *obj; 236 int ret; 237 238 obj = drm_gem_object_lookup(file, handle); 239 if (!obj) 240 return -ENOENT; 241 242 if (!obj->filp) { 243 ret = -EINVAL; 244 goto unref; 245 } 246 247 ret = drm_gem_create_mmap_offset(obj); 248 if (ret) 249 goto unref; 250 251 *offset = drm_vma_node_offset_addr(&obj->vma_node); 252 unref: 253 drm_gem_object_put_unlocked(obj); 254 255 return ret; 256 } 257 258 static struct drm_ioctl_desc vgem_ioctls[] = { 259 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW), 260 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW), 261 }; 262 263 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) 264 { 265 unsigned long flags = vma->vm_flags; 266 int ret; 267 268 ret = drm_gem_mmap(filp, vma); 269 if (ret) 270 return ret; 271 272 /* Keep the WC mmaping set by drm_gem_mmap() but our pages 273 * are ordinary and not special. 274 */ 275 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; 276 return 0; 277 } 278 279 static const struct file_operations vgem_driver_fops = { 280 .owner = THIS_MODULE, 281 .open = drm_open, 282 .mmap = vgem_mmap, 283 .poll = drm_poll, 284 .read = drm_read, 285 .unlocked_ioctl = drm_ioctl, 286 .compat_ioctl = drm_compat_ioctl, 287 .release = drm_release, 288 }; 289 290 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) 291 { 292 mutex_lock(&bo->pages_lock); 293 if (bo->pages_pin_count++ == 0) { 294 struct page **pages; 295 296 pages = drm_gem_get_pages(&bo->base); 297 if (IS_ERR(pages)) { 298 bo->pages_pin_count--; 299 mutex_unlock(&bo->pages_lock); 300 return pages; 301 } 302 303 bo->pages = pages; 304 } 305 mutex_unlock(&bo->pages_lock); 306 307 return bo->pages; 308 } 309 310 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) 311 { 312 mutex_lock(&bo->pages_lock); 313 if (--bo->pages_pin_count == 0) { 314 drm_gem_put_pages(&bo->base, bo->pages, true, true); 315 bo->pages = NULL; 316 } 317 mutex_unlock(&bo->pages_lock); 318 } 319 320 static int vgem_prime_pin(struct drm_gem_object *obj) 321 { 322 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 323 long n_pages = obj->size >> PAGE_SHIFT; 324 struct page **pages; 325 326 pages = vgem_pin_pages(bo); 327 if (IS_ERR(pages)) 328 return PTR_ERR(pages); 329 330 /* Flush the object from the CPU cache so that importers can rely 331 * on coherent indirect access via the exported dma-address. 332 */ 333 drm_clflush_pages(pages, n_pages); 334 335 return 0; 336 } 337 338 static void vgem_prime_unpin(struct drm_gem_object *obj) 339 { 340 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 341 342 vgem_unpin_pages(bo); 343 } 344 345 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) 346 { 347 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 348 349 return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); 350 } 351 352 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, 353 struct dma_buf *dma_buf) 354 { 355 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 356 357 return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev); 358 } 359 360 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, 361 struct dma_buf_attachment *attach, struct sg_table *sg) 362 { 363 struct drm_vgem_gem_object *obj; 364 int npages; 365 366 obj = __vgem_gem_create(dev, attach->dmabuf->size); 367 if (IS_ERR(obj)) 368 return ERR_CAST(obj); 369 370 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; 371 372 obj->table = sg; 373 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 374 if (!obj->pages) { 375 __vgem_gem_destroy(obj); 376 return ERR_PTR(-ENOMEM); 377 } 378 379 obj->pages_pin_count++; /* perma-pinned */ 380 drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, 381 npages); 382 return &obj->base; 383 } 384 385 static void *vgem_prime_vmap(struct drm_gem_object *obj) 386 { 387 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 388 long n_pages = obj->size >> PAGE_SHIFT; 389 struct page **pages; 390 391 pages = vgem_pin_pages(bo); 392 if (IS_ERR(pages)) 393 return NULL; 394 395 return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); 396 } 397 398 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 399 { 400 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 401 402 vunmap(vaddr); 403 vgem_unpin_pages(bo); 404 } 405 406 static int vgem_prime_mmap(struct drm_gem_object *obj, 407 struct vm_area_struct *vma) 408 { 409 int ret; 410 411 if (obj->size < vma->vm_end - vma->vm_start) 412 return -EINVAL; 413 414 if (!obj->filp) 415 return -ENODEV; 416 417 ret = call_mmap(obj->filp, vma); 418 if (ret) 419 return ret; 420 421 fput(vma->vm_file); 422 vma->vm_file = get_file(obj->filp); 423 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 424 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 425 426 return 0; 427 } 428 429 static void vgem_release(struct drm_device *dev) 430 { 431 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 432 433 platform_device_unregister(vgem->platform); 434 drm_dev_fini(&vgem->drm); 435 436 kfree(vgem); 437 } 438 439 static struct drm_driver vgem_driver = { 440 .driver_features = DRIVER_GEM | DRIVER_RENDER, 441 .release = vgem_release, 442 .open = vgem_open, 443 .postclose = vgem_postclose, 444 .gem_free_object_unlocked = vgem_gem_free_object, 445 .gem_vm_ops = &vgem_gem_vm_ops, 446 .ioctls = vgem_ioctls, 447 .num_ioctls = ARRAY_SIZE(vgem_ioctls), 448 .fops = &vgem_driver_fops, 449 450 .dumb_create = vgem_gem_dumb_create, 451 .dumb_map_offset = vgem_gem_dumb_map, 452 453 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 454 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 455 .gem_prime_pin = vgem_prime_pin, 456 .gem_prime_unpin = vgem_prime_unpin, 457 .gem_prime_import = vgem_prime_import, 458 .gem_prime_import_sg_table = vgem_prime_import_sg_table, 459 .gem_prime_get_sg_table = vgem_prime_get_sg_table, 460 .gem_prime_vmap = vgem_prime_vmap, 461 .gem_prime_vunmap = vgem_prime_vunmap, 462 .gem_prime_mmap = vgem_prime_mmap, 463 464 .name = DRIVER_NAME, 465 .desc = DRIVER_DESC, 466 .date = DRIVER_DATE, 467 .major = DRIVER_MAJOR, 468 .minor = DRIVER_MINOR, 469 }; 470 471 static int __init vgem_init(void) 472 { 473 int ret; 474 475 vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL); 476 if (!vgem_device) 477 return -ENOMEM; 478 479 vgem_device->platform = 480 platform_device_register_simple("vgem", -1, NULL, 0); 481 if (IS_ERR(vgem_device->platform)) { 482 ret = PTR_ERR(vgem_device->platform); 483 goto out_free; 484 } 485 486 dma_coerce_mask_and_coherent(&vgem_device->platform->dev, 487 DMA_BIT_MASK(64)); 488 ret = drm_dev_init(&vgem_device->drm, &vgem_driver, 489 &vgem_device->platform->dev); 490 if (ret) 491 goto out_unregister; 492 493 /* Final step: expose the device/driver to userspace */ 494 ret = drm_dev_register(&vgem_device->drm, 0); 495 if (ret) 496 goto out_fini; 497 498 return 0; 499 500 out_fini: 501 drm_dev_fini(&vgem_device->drm); 502 out_unregister: 503 platform_device_unregister(vgem_device->platform); 504 out_free: 505 kfree(vgem_device); 506 return ret; 507 } 508 509 static void __exit vgem_exit(void) 510 { 511 drm_dev_unregister(&vgem_device->drm); 512 drm_dev_put(&vgem_device->drm); 513 } 514 515 module_init(vgem_init); 516 module_exit(vgem_exit); 517 518 MODULE_AUTHOR("Red Hat, Inc."); 519 MODULE_AUTHOR("Intel Corporation"); 520 MODULE_DESCRIPTION(DRIVER_DESC); 521 MODULE_LICENSE("GPL and additional rights"); 522