1 /* 2 * Copyright 2011 Red Hat, Inc. 3 * Copyright © 2014 The Chromium OS Authors 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software") 7 * to deal in the software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * them Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER 20 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Adam Jackson <ajax@redhat.com> 25 * Ben Widawsky <ben@bwidawsk.net> 26 */ 27 28 /** 29 * This is vgem, a (non-hardware-backed) GEM service. This is used by Mesa's 30 * software renderer and the X server for efficient buffer sharing. 31 */ 32 33 #include <linux/module.h> 34 #include <linux/ramfs.h> 35 #include <linux/shmem_fs.h> 36 #include <linux/dma-buf.h> 37 #include "vgem_drv.h" 38 39 #define DRIVER_NAME "vgem" 40 #define DRIVER_DESC "Virtual GEM provider" 41 #define DRIVER_DATE "20120112" 42 #define DRIVER_MAJOR 1 43 #define DRIVER_MINOR 0 44 45 static struct vgem_device { 46 struct drm_device drm; 47 struct platform_device *platform; 48 } *vgem_device; 49 50 static void vgem_gem_free_object(struct drm_gem_object *obj) 51 { 52 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 53 54 kvfree(vgem_obj->pages); 55 mutex_destroy(&vgem_obj->pages_lock); 56 57 if (obj->import_attach) 58 drm_prime_gem_destroy(obj, vgem_obj->table); 59 60 drm_gem_object_release(obj); 61 kfree(vgem_obj); 62 } 63 64 static vm_fault_t vgem_gem_fault(struct vm_fault *vmf) 65 { 66 struct vm_area_struct *vma = vmf->vma; 67 struct drm_vgem_gem_object *obj = vma->vm_private_data; 68 /* We don't use vmf->pgoff since that has the fake offset */ 69 unsigned long vaddr = vmf->address; 70 vm_fault_t ret = VM_FAULT_SIGBUS; 71 loff_t num_pages; 72 pgoff_t page_offset; 73 page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT; 74 75 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 76 77 if (page_offset >= num_pages) 78 return VM_FAULT_SIGBUS; 79 80 mutex_lock(&obj->pages_lock); 81 if (obj->pages) { 82 get_page(obj->pages[page_offset]); 83 vmf->page = obj->pages[page_offset]; 84 ret = 0; 85 } 86 mutex_unlock(&obj->pages_lock); 87 if (ret) { 88 struct page *page; 89 90 page = shmem_read_mapping_page( 91 file_inode(obj->base.filp)->i_mapping, 92 page_offset); 93 if (!IS_ERR(page)) { 94 vmf->page = page; 95 ret = 0; 96 } else switch (PTR_ERR(page)) { 97 case -ENOSPC: 98 case -ENOMEM: 99 ret = VM_FAULT_OOM; 100 break; 101 case -EBUSY: 102 ret = VM_FAULT_RETRY; 103 break; 104 case -EFAULT: 105 case -EINVAL: 106 ret = VM_FAULT_SIGBUS; 107 break; 108 default: 109 WARN_ON(PTR_ERR(page)); 110 ret = VM_FAULT_SIGBUS; 111 break; 112 } 113 114 } 115 return ret; 116 } 117 118 static const struct vm_operations_struct vgem_gem_vm_ops = { 119 .fault = vgem_gem_fault, 120 .open = drm_gem_vm_open, 121 .close = drm_gem_vm_close, 122 }; 123 124 static int vgem_open(struct drm_device *dev, struct drm_file *file) 125 { 126 struct vgem_file *vfile; 127 int ret; 128 129 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL); 130 if (!vfile) 131 return -ENOMEM; 132 133 file->driver_priv = vfile; 134 135 ret = vgem_fence_open(vfile); 136 if (ret) { 137 kfree(vfile); 138 return ret; 139 } 140 141 return 0; 142 } 143 144 static void vgem_postclose(struct drm_device *dev, struct drm_file *file) 145 { 146 struct vgem_file *vfile = file->driver_priv; 147 148 vgem_fence_close(vfile); 149 kfree(vfile); 150 } 151 152 static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev, 153 unsigned long size) 154 { 155 struct drm_vgem_gem_object *obj; 156 int ret; 157 158 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 159 if (!obj) 160 return ERR_PTR(-ENOMEM); 161 162 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); 163 if (ret) { 164 kfree(obj); 165 return ERR_PTR(ret); 166 } 167 168 mutex_init(&obj->pages_lock); 169 170 return obj; 171 } 172 173 static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj) 174 { 175 drm_gem_object_release(&obj->base); 176 kfree(obj); 177 } 178 179 static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, 180 struct drm_file *file, 181 unsigned int *handle, 182 unsigned long size) 183 { 184 struct drm_vgem_gem_object *obj; 185 int ret; 186 187 obj = __vgem_gem_create(dev, size); 188 if (IS_ERR(obj)) 189 return ERR_CAST(obj); 190 191 ret = drm_gem_handle_create(file, &obj->base, handle); 192 drm_gem_object_put_unlocked(&obj->base); 193 if (ret) 194 goto err; 195 196 return &obj->base; 197 198 err: 199 __vgem_gem_destroy(obj); 200 return ERR_PTR(ret); 201 } 202 203 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 204 struct drm_mode_create_dumb *args) 205 { 206 struct drm_gem_object *gem_object; 207 u64 pitch, size; 208 209 pitch = args->width * DIV_ROUND_UP(args->bpp, 8); 210 size = args->height * pitch; 211 if (size == 0) 212 return -EINVAL; 213 214 gem_object = vgem_gem_create(dev, file, &args->handle, size); 215 if (IS_ERR(gem_object)) 216 return PTR_ERR(gem_object); 217 218 args->size = gem_object->size; 219 args->pitch = pitch; 220 221 DRM_DEBUG_DRIVER("Created object of size %lld\n", size); 222 223 return 0; 224 } 225 226 static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 227 uint32_t handle, uint64_t *offset) 228 { 229 struct drm_gem_object *obj; 230 int ret; 231 232 obj = drm_gem_object_lookup(file, handle); 233 if (!obj) 234 return -ENOENT; 235 236 if (!obj->filp) { 237 ret = -EINVAL; 238 goto unref; 239 } 240 241 ret = drm_gem_create_mmap_offset(obj); 242 if (ret) 243 goto unref; 244 245 *offset = drm_vma_node_offset_addr(&obj->vma_node); 246 unref: 247 drm_gem_object_put_unlocked(obj); 248 249 return ret; 250 } 251 252 static struct drm_ioctl_desc vgem_ioctls[] = { 253 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 254 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 255 }; 256 257 static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) 258 { 259 unsigned long flags = vma->vm_flags; 260 int ret; 261 262 ret = drm_gem_mmap(filp, vma); 263 if (ret) 264 return ret; 265 266 /* Keep the WC mmaping set by drm_gem_mmap() but our pages 267 * are ordinary and not special. 268 */ 269 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; 270 return 0; 271 } 272 273 static const struct file_operations vgem_driver_fops = { 274 .owner = THIS_MODULE, 275 .open = drm_open, 276 .mmap = vgem_mmap, 277 .poll = drm_poll, 278 .read = drm_read, 279 .unlocked_ioctl = drm_ioctl, 280 .compat_ioctl = drm_compat_ioctl, 281 .release = drm_release, 282 }; 283 284 static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo) 285 { 286 mutex_lock(&bo->pages_lock); 287 if (bo->pages_pin_count++ == 0) { 288 struct page **pages; 289 290 pages = drm_gem_get_pages(&bo->base); 291 if (IS_ERR(pages)) { 292 bo->pages_pin_count--; 293 mutex_unlock(&bo->pages_lock); 294 return pages; 295 } 296 297 bo->pages = pages; 298 } 299 mutex_unlock(&bo->pages_lock); 300 301 return bo->pages; 302 } 303 304 static void vgem_unpin_pages(struct drm_vgem_gem_object *bo) 305 { 306 mutex_lock(&bo->pages_lock); 307 if (--bo->pages_pin_count == 0) { 308 drm_gem_put_pages(&bo->base, bo->pages, true, true); 309 bo->pages = NULL; 310 } 311 mutex_unlock(&bo->pages_lock); 312 } 313 314 static int vgem_prime_pin(struct drm_gem_object *obj) 315 { 316 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 317 long n_pages = obj->size >> PAGE_SHIFT; 318 struct page **pages; 319 320 pages = vgem_pin_pages(bo); 321 if (IS_ERR(pages)) 322 return PTR_ERR(pages); 323 324 /* Flush the object from the CPU cache so that importers can rely 325 * on coherent indirect access via the exported dma-address. 326 */ 327 drm_clflush_pages(pages, n_pages); 328 329 return 0; 330 } 331 332 static void vgem_prime_unpin(struct drm_gem_object *obj) 333 { 334 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 335 336 vgem_unpin_pages(bo); 337 } 338 339 static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) 340 { 341 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 342 343 return drm_prime_pages_to_sg(bo->pages, bo->base.size >> PAGE_SHIFT); 344 } 345 346 static struct drm_gem_object* vgem_prime_import(struct drm_device *dev, 347 struct dma_buf *dma_buf) 348 { 349 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 350 351 return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev); 352 } 353 354 static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, 355 struct dma_buf_attachment *attach, struct sg_table *sg) 356 { 357 struct drm_vgem_gem_object *obj; 358 int npages; 359 360 obj = __vgem_gem_create(dev, attach->dmabuf->size); 361 if (IS_ERR(obj)) 362 return ERR_CAST(obj); 363 364 npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; 365 366 obj->table = sg; 367 obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); 368 if (!obj->pages) { 369 __vgem_gem_destroy(obj); 370 return ERR_PTR(-ENOMEM); 371 } 372 373 obj->pages_pin_count++; /* perma-pinned */ 374 drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, 375 npages); 376 return &obj->base; 377 } 378 379 static void *vgem_prime_vmap(struct drm_gem_object *obj) 380 { 381 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 382 long n_pages = obj->size >> PAGE_SHIFT; 383 struct page **pages; 384 385 pages = vgem_pin_pages(bo); 386 if (IS_ERR(pages)) 387 return NULL; 388 389 return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); 390 } 391 392 static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 393 { 394 struct drm_vgem_gem_object *bo = to_vgem_bo(obj); 395 396 vunmap(vaddr); 397 vgem_unpin_pages(bo); 398 } 399 400 static int vgem_prime_mmap(struct drm_gem_object *obj, 401 struct vm_area_struct *vma) 402 { 403 int ret; 404 405 if (obj->size < vma->vm_end - vma->vm_start) 406 return -EINVAL; 407 408 if (!obj->filp) 409 return -ENODEV; 410 411 ret = call_mmap(obj->filp, vma); 412 if (ret) 413 return ret; 414 415 fput(vma->vm_file); 416 vma->vm_file = get_file(obj->filp); 417 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 418 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 419 420 return 0; 421 } 422 423 static void vgem_release(struct drm_device *dev) 424 { 425 struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm); 426 427 platform_device_unregister(vgem->platform); 428 drm_dev_fini(&vgem->drm); 429 430 kfree(vgem); 431 } 432 433 static struct drm_driver vgem_driver = { 434 .driver_features = DRIVER_GEM | DRIVER_PRIME | 435 DRIVER_RENDER, 436 .release = vgem_release, 437 .open = vgem_open, 438 .postclose = vgem_postclose, 439 .gem_free_object_unlocked = vgem_gem_free_object, 440 .gem_vm_ops = &vgem_gem_vm_ops, 441 .ioctls = vgem_ioctls, 442 .num_ioctls = ARRAY_SIZE(vgem_ioctls), 443 .fops = &vgem_driver_fops, 444 445 .dumb_create = vgem_gem_dumb_create, 446 .dumb_map_offset = vgem_gem_dumb_map, 447 448 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 449 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 450 .gem_prime_pin = vgem_prime_pin, 451 .gem_prime_unpin = vgem_prime_unpin, 452 .gem_prime_import = vgem_prime_import, 453 .gem_prime_export = drm_gem_prime_export, 454 .gem_prime_import_sg_table = vgem_prime_import_sg_table, 455 .gem_prime_get_sg_table = vgem_prime_get_sg_table, 456 .gem_prime_vmap = vgem_prime_vmap, 457 .gem_prime_vunmap = vgem_prime_vunmap, 458 .gem_prime_mmap = vgem_prime_mmap, 459 460 .name = DRIVER_NAME, 461 .desc = DRIVER_DESC, 462 .date = DRIVER_DATE, 463 .major = DRIVER_MAJOR, 464 .minor = DRIVER_MINOR, 465 }; 466 467 static int __init vgem_init(void) 468 { 469 int ret; 470 471 vgem_device = kzalloc(sizeof(*vgem_device), GFP_KERNEL); 472 if (!vgem_device) 473 return -ENOMEM; 474 475 vgem_device->platform = 476 platform_device_register_simple("vgem", -1, NULL, 0); 477 if (IS_ERR(vgem_device->platform)) { 478 ret = PTR_ERR(vgem_device->platform); 479 goto out_free; 480 } 481 482 dma_coerce_mask_and_coherent(&vgem_device->platform->dev, 483 DMA_BIT_MASK(64)); 484 ret = drm_dev_init(&vgem_device->drm, &vgem_driver, 485 &vgem_device->platform->dev); 486 if (ret) 487 goto out_unregister; 488 489 /* Final step: expose the device/driver to userspace */ 490 ret = drm_dev_register(&vgem_device->drm, 0); 491 if (ret) 492 goto out_fini; 493 494 return 0; 495 496 out_fini: 497 drm_dev_fini(&vgem_device->drm); 498 out_unregister: 499 platform_device_unregister(vgem_device->platform); 500 out_free: 501 kfree(vgem_device); 502 return ret; 503 } 504 505 static void __exit vgem_exit(void) 506 { 507 drm_dev_unregister(&vgem_device->drm); 508 drm_dev_put(&vgem_device->drm); 509 } 510 511 module_init(vgem_init); 512 module_exit(vgem_exit); 513 514 MODULE_AUTHOR("Red Hat, Inc."); 515 MODULE_AUTHOR("Intel Corporation"); 516 MODULE_DESCRIPTION(DRIVER_DESC); 517 MODULE_LICENSE("GPL and additional rights"); 518